var/home/core/zuul-output/0000755000175000017500000000000015111676210014525 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015111714557015500 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000006065770015111714547017716 0ustar rootrootNov 26 22:21:10 crc systemd[1]: Starting Kubernetes Kubelet... Nov 26 22:21:10 crc restorecon[4693]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:10 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 22:21:11 crc restorecon[4693]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 22:21:11 crc restorecon[4693]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 26 22:21:11 crc kubenswrapper[4903]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 26 22:21:11 crc kubenswrapper[4903]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 26 22:21:11 crc kubenswrapper[4903]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 26 22:21:11 crc kubenswrapper[4903]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 26 22:21:11 crc kubenswrapper[4903]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 26 22:21:11 crc kubenswrapper[4903]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.772813 4903 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.781951 4903 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782003 4903 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782014 4903 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782022 4903 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782031 4903 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782039 4903 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782049 4903 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782060 4903 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782073 4903 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782081 4903 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782090 4903 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782098 4903 feature_gate.go:330] unrecognized feature gate: Example Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782107 4903 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782115 4903 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782123 4903 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782132 4903 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782141 4903 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782150 4903 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782157 4903 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782166 4903 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782175 4903 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782183 4903 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782192 4903 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782200 4903 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782209 4903 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782216 4903 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782225 4903 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782234 4903 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782244 4903 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782253 4903 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782262 4903 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782271 4903 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782282 4903 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782298 4903 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782308 4903 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782316 4903 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782325 4903 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782334 4903 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782342 4903 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782351 4903 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782360 4903 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782368 4903 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782379 4903 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782388 4903 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782396 4903 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782405 4903 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782413 4903 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782422 4903 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782431 4903 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782440 4903 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782448 4903 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782457 4903 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782465 4903 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782474 4903 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782482 4903 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782490 4903 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782497 4903 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782505 4903 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782514 4903 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782522 4903 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782531 4903 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782547 4903 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782555 4903 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782563 4903 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782572 4903 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782580 4903 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782587 4903 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782595 4903 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782602 4903 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782610 4903 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.782620 4903 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.783846 4903 flags.go:64] FLAG: --address="0.0.0.0" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.783872 4903 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.783888 4903 flags.go:64] FLAG: --anonymous-auth="true" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.783900 4903 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.783913 4903 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.783922 4903 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.783933 4903 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.783944 4903 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.783954 4903 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.783963 4903 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.783972 4903 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.783996 4903 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784005 4903 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784014 4903 flags.go:64] FLAG: --cgroup-root="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784023 4903 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784032 4903 flags.go:64] FLAG: --client-ca-file="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784040 4903 flags.go:64] FLAG: --cloud-config="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784049 4903 flags.go:64] FLAG: --cloud-provider="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784058 4903 flags.go:64] FLAG: --cluster-dns="[]" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784070 4903 flags.go:64] FLAG: --cluster-domain="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784079 4903 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784088 4903 flags.go:64] FLAG: --config-dir="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784097 4903 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784106 4903 flags.go:64] FLAG: --container-log-max-files="5" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784119 4903 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784128 4903 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784138 4903 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784150 4903 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784160 4903 flags.go:64] FLAG: --contention-profiling="false" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784169 4903 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784178 4903 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784187 4903 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784196 4903 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784208 4903 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784216 4903 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784226 4903 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784235 4903 flags.go:64] FLAG: --enable-load-reader="false" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784244 4903 flags.go:64] FLAG: --enable-server="true" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784253 4903 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784263 4903 flags.go:64] FLAG: --event-burst="100" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784273 4903 flags.go:64] FLAG: --event-qps="50" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784282 4903 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784290 4903 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784299 4903 flags.go:64] FLAG: --eviction-hard="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784310 4903 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784319 4903 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784328 4903 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784338 4903 flags.go:64] FLAG: --eviction-soft="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784347 4903 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784356 4903 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784365 4903 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784373 4903 flags.go:64] FLAG: --experimental-mounter-path="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784383 4903 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784392 4903 flags.go:64] FLAG: --fail-swap-on="true" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784401 4903 flags.go:64] FLAG: --feature-gates="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784412 4903 flags.go:64] FLAG: --file-check-frequency="20s" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784421 4903 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784431 4903 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784440 4903 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784449 4903 flags.go:64] FLAG: --healthz-port="10248" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784458 4903 flags.go:64] FLAG: --help="false" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784467 4903 flags.go:64] FLAG: --hostname-override="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784475 4903 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784484 4903 flags.go:64] FLAG: --http-check-frequency="20s" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784494 4903 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784504 4903 flags.go:64] FLAG: --image-credential-provider-config="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784513 4903 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784522 4903 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784531 4903 flags.go:64] FLAG: --image-service-endpoint="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784539 4903 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784548 4903 flags.go:64] FLAG: --kube-api-burst="100" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784557 4903 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784566 4903 flags.go:64] FLAG: --kube-api-qps="50" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784575 4903 flags.go:64] FLAG: --kube-reserved="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784584 4903 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784592 4903 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784601 4903 flags.go:64] FLAG: --kubelet-cgroups="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784610 4903 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784619 4903 flags.go:64] FLAG: --lock-file="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784627 4903 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784636 4903 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784645 4903 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784659 4903 flags.go:64] FLAG: --log-json-split-stream="false" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784675 4903 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784685 4903 flags.go:64] FLAG: --log-text-split-stream="false" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784720 4903 flags.go:64] FLAG: --logging-format="text" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784729 4903 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784739 4903 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784748 4903 flags.go:64] FLAG: --manifest-url="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784757 4903 flags.go:64] FLAG: --manifest-url-header="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784769 4903 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784779 4903 flags.go:64] FLAG: --max-open-files="1000000" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784790 4903 flags.go:64] FLAG: --max-pods="110" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784799 4903 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784808 4903 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784817 4903 flags.go:64] FLAG: --memory-manager-policy="None" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784826 4903 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784835 4903 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784846 4903 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784855 4903 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784888 4903 flags.go:64] FLAG: --node-status-max-images="50" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784898 4903 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784907 4903 flags.go:64] FLAG: --oom-score-adj="-999" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784917 4903 flags.go:64] FLAG: --pod-cidr="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784926 4903 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784939 4903 flags.go:64] FLAG: --pod-manifest-path="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784948 4903 flags.go:64] FLAG: --pod-max-pids="-1" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784957 4903 flags.go:64] FLAG: --pods-per-core="0" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784966 4903 flags.go:64] FLAG: --port="10250" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784975 4903 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784984 4903 flags.go:64] FLAG: --provider-id="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.784993 4903 flags.go:64] FLAG: --qos-reserved="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785002 4903 flags.go:64] FLAG: --read-only-port="10255" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785011 4903 flags.go:64] FLAG: --register-node="true" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785020 4903 flags.go:64] FLAG: --register-schedulable="true" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785028 4903 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785043 4903 flags.go:64] FLAG: --registry-burst="10" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785051 4903 flags.go:64] FLAG: --registry-qps="5" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785060 4903 flags.go:64] FLAG: --reserved-cpus="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785079 4903 flags.go:64] FLAG: --reserved-memory="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785091 4903 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785100 4903 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785109 4903 flags.go:64] FLAG: --rotate-certificates="false" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785120 4903 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785130 4903 flags.go:64] FLAG: --runonce="false" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785139 4903 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785149 4903 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785159 4903 flags.go:64] FLAG: --seccomp-default="false" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785169 4903 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785177 4903 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785187 4903 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785196 4903 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785205 4903 flags.go:64] FLAG: --storage-driver-password="root" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785214 4903 flags.go:64] FLAG: --storage-driver-secure="false" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785223 4903 flags.go:64] FLAG: --storage-driver-table="stats" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785232 4903 flags.go:64] FLAG: --storage-driver-user="root" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785241 4903 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785250 4903 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785259 4903 flags.go:64] FLAG: --system-cgroups="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785268 4903 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785282 4903 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785291 4903 flags.go:64] FLAG: --tls-cert-file="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785299 4903 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785311 4903 flags.go:64] FLAG: --tls-min-version="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785320 4903 flags.go:64] FLAG: --tls-private-key-file="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785329 4903 flags.go:64] FLAG: --topology-manager-policy="none" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785337 4903 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785346 4903 flags.go:64] FLAG: --topology-manager-scope="container" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785356 4903 flags.go:64] FLAG: --v="2" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785373 4903 flags.go:64] FLAG: --version="false" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785385 4903 flags.go:64] FLAG: --vmodule="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785395 4903 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.785405 4903 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785621 4903 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785632 4903 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785643 4903 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785652 4903 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785660 4903 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785669 4903 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785678 4903 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785686 4903 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785719 4903 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785728 4903 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785737 4903 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785745 4903 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785754 4903 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785762 4903 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785770 4903 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785777 4903 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785785 4903 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785793 4903 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785801 4903 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785808 4903 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785816 4903 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785824 4903 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785833 4903 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785840 4903 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785848 4903 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785855 4903 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785863 4903 feature_gate.go:330] unrecognized feature gate: Example Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785871 4903 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785879 4903 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785887 4903 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785894 4903 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785909 4903 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785916 4903 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785924 4903 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785932 4903 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785939 4903 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785947 4903 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785954 4903 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785967 4903 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785975 4903 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785983 4903 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.785991 4903 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.786001 4903 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.786012 4903 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.786020 4903 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.786028 4903 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.786036 4903 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.786044 4903 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.786053 4903 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.786060 4903 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.786068 4903 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.786076 4903 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.786085 4903 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.786092 4903 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.786100 4903 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.786110 4903 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.786119 4903 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.786127 4903 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.786134 4903 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.786142 4903 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.786150 4903 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.786160 4903 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.786170 4903 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.786181 4903 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.786190 4903 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.786199 4903 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.786208 4903 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.786216 4903 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.786224 4903 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.786233 4903 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.786242 4903 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.786255 4903 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.800951 4903 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.801349 4903 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801501 4903 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801516 4903 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801527 4903 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801537 4903 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801545 4903 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801555 4903 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801567 4903 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801579 4903 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801588 4903 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801597 4903 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801605 4903 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801612 4903 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801620 4903 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801628 4903 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801637 4903 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801644 4903 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801652 4903 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801660 4903 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801668 4903 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801676 4903 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801684 4903 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801725 4903 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801734 4903 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801742 4903 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801750 4903 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801758 4903 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801765 4903 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801773 4903 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801781 4903 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801894 4903 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801903 4903 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801911 4903 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801918 4903 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801926 4903 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801937 4903 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801950 4903 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801959 4903 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801968 4903 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801977 4903 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801986 4903 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.801994 4903 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802002 4903 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802011 4903 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802019 4903 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802027 4903 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802034 4903 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802042 4903 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802050 4903 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802059 4903 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802067 4903 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802074 4903 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802082 4903 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802089 4903 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802097 4903 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802104 4903 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802115 4903 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802125 4903 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802135 4903 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802144 4903 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802153 4903 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802162 4903 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802170 4903 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802179 4903 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802187 4903 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802196 4903 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802205 4903 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802212 4903 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802220 4903 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802227 4903 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802235 4903 feature_gate.go:330] unrecognized feature gate: Example Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802244 4903 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.802257 4903 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802536 4903 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802552 4903 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802561 4903 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802570 4903 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802578 4903 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802590 4903 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802598 4903 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802606 4903 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802615 4903 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802628 4903 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802673 4903 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802684 4903 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802715 4903 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802724 4903 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802732 4903 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802740 4903 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802747 4903 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802756 4903 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802764 4903 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802772 4903 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802780 4903 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802788 4903 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802795 4903 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802803 4903 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802811 4903 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802819 4903 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802827 4903 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802834 4903 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802842 4903 feature_gate.go:330] unrecognized feature gate: Example Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802850 4903 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802858 4903 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802865 4903 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802873 4903 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802881 4903 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802894 4903 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802906 4903 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802915 4903 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802923 4903 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802931 4903 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802940 4903 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802948 4903 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802957 4903 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802965 4903 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802972 4903 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802980 4903 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802988 4903 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.802996 4903 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.803003 4903 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.803011 4903 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.803018 4903 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.803026 4903 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.803034 4903 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.803041 4903 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.803049 4903 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.803057 4903 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.803064 4903 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.803073 4903 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.803081 4903 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.803089 4903 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.803096 4903 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.803104 4903 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.803112 4903 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.803119 4903 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.803127 4903 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.803134 4903 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.803142 4903 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.803152 4903 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.803160 4903 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.803169 4903 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.803177 4903 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.803185 4903 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.803197 4903 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.803436 4903 server.go:940] "Client rotation is on, will bootstrap in background" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.809490 4903 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.809670 4903 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.812085 4903 server.go:997] "Starting client certificate rotation" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.812141 4903 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.813358 4903 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2026-01-10 15:25:42.870882989 +0000 UTC Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.813565 4903 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 1073h4m31.057323746s for next certificate rotation Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.840429 4903 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.843202 4903 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.863816 4903 log.go:25] "Validated CRI v1 runtime API" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.902320 4903 log.go:25] "Validated CRI v1 image API" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.904217 4903 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.910045 4903 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-26-22-16-24-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.910078 4903 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.938278 4903 manager.go:217] Machine: {Timestamp:2025-11-26 22:21:11.934070646 +0000 UTC m=+0.624305626 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799998 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:4bd090e0-3377-4de9-8ab0-9d1eda387d4f BootID:7d47a3d9-b3b7-4680-8967-b1b51d436e50 Filesystems:[{Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:af:63:17 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:af:63:17 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:be:1d:55 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:ee:82:0f Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:cb:bb:f5 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:18:0c:62 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:12:b3:7f:2f:13:de Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:ae:b8:23:5c:3b:a8 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.938877 4903 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.939197 4903 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.939843 4903 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.940276 4903 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.940348 4903 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.940827 4903 topology_manager.go:138] "Creating topology manager with none policy" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.940850 4903 container_manager_linux.go:303] "Creating device plugin manager" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.941465 4903 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.941522 4903 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.942058 4903 state_mem.go:36] "Initialized new in-memory state store" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.942220 4903 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.945868 4903 kubelet.go:418] "Attempting to sync node with API server" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.945919 4903 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.945976 4903 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.946010 4903 kubelet.go:324] "Adding apiserver pod source" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.946040 4903 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.952302 4903 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.952924 4903 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.219:6443: connect: connection refused Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.952930 4903 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.219:6443: connect: connection refused Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.953390 4903 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 26 22:21:11 crc kubenswrapper[4903]: E1126 22:21:11.953793 4903 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.219:6443: connect: connection refused" logger="UnhandledError" Nov 26 22:21:11 crc kubenswrapper[4903]: E1126 22:21:11.953808 4903 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.219:6443: connect: connection refused" logger="UnhandledError" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.955038 4903 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.956946 4903 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.956994 4903 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.957012 4903 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.957027 4903 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.957050 4903 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.957068 4903 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.957083 4903 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.957107 4903 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.957123 4903 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.957139 4903 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.957191 4903 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.957207 4903 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.958370 4903 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.959519 4903 server.go:1280] "Started kubelet" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.959744 4903 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.959954 4903 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.959606 4903 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.219:6443: connect: connection refused Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.961183 4903 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 26 22:21:11 crc systemd[1]: Started Kubernetes Kubelet. Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.963727 4903 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.963742 4903 server.go:460] "Adding debug handlers to kubelet server" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.963770 4903 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.963823 4903 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 13:45:39.532127693 +0000 UTC Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.963984 4903 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 447h24m27.568152099s for next certificate rotation Nov 26 22:21:11 crc kubenswrapper[4903]: E1126 22:21:11.964127 4903 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.964610 4903 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.964641 4903 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 26 22:21:11 crc kubenswrapper[4903]: E1126 22:21:11.964633 4903 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.219:6443: connect: connection refused" interval="200ms" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.964807 4903 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 26 22:21:11 crc kubenswrapper[4903]: W1126 22:21:11.965588 4903 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.219:6443: connect: connection refused Nov 26 22:21:11 crc kubenswrapper[4903]: E1126 22:21:11.965688 4903 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.219:6443: connect: connection refused" logger="UnhandledError" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.975564 4903 factory.go:55] Registering systemd factory Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.975619 4903 factory.go:221] Registration of the systemd container factory successfully Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.976234 4903 factory.go:153] Registering CRI-O factory Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.976264 4903 factory.go:221] Registration of the crio container factory successfully Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.976383 4903 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.976437 4903 factory.go:103] Registering Raw factory Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.976467 4903 manager.go:1196] Started watching for new ooms in manager Nov 26 22:21:11 crc kubenswrapper[4903]: E1126 22:21:11.975776 4903 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.219:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187bae9dd56caa2f default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-26 22:21:11.959448111 +0000 UTC m=+0.649683091,LastTimestamp:2025-11-26 22:21:11.959448111 +0000 UTC m=+0.649683091,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.978068 4903 manager.go:319] Starting recovery of all containers Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981085 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981141 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981152 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981161 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981171 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981179 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981189 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981198 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981208 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981217 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981227 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981236 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981270 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981281 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981294 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981305 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981338 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981351 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981362 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981372 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981386 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981398 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981438 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981451 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981463 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981476 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981490 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981503 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981515 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981526 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981536 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981551 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981564 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981575 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981587 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981599 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981611 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981624 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981637 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981649 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981663 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981675 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981701 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981715 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981727 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981738 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981749 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981762 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981775 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981788 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981799 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981810 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981825 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981839 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981852 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981863 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981875 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981887 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981902 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981913 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981927 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981940 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981952 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981963 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981976 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.981988 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.982000 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.982011 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.982022 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.982032 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.982043 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.982054 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.982067 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.982079 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.982088 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.982099 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.982109 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.982119 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.982129 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.982144 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.982154 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.982164 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.982175 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.982184 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.982193 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.982205 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.982216 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.982225 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.982235 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.982245 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.982255 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.982264 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.982274 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984082 4903 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984112 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984126 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984140 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984176 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984189 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984203 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984216 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984228 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984240 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984253 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984264 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984286 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984298 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984309 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984318 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984332 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984343 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984355 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984368 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984381 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984392 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984403 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984430 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984442 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984452 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984463 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984473 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984484 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984494 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984504 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984514 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984524 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984534 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984545 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984557 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984568 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984581 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984594 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984605 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984618 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984630 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984641 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984651 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984661 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984671 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984681 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984705 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984716 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984725 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984734 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984747 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984757 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984766 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984775 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984784 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984794 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984805 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984815 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984824 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984834 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984844 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984857 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984867 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984877 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984887 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984906 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984917 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984927 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984936 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984944 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984955 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984965 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984973 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.984991 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985001 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985012 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985022 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985031 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985040 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985048 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985057 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985066 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985075 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985085 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985093 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985103 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985112 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985121 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985132 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985152 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985169 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985228 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985242 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985256 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985268 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985279 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985291 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985302 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985315 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985327 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985338 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985351 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985363 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985375 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985387 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985398 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985408 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985418 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985427 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985436 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985446 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985454 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985465 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985476 4903 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985489 4903 reconstruct.go:97] "Volume reconstruction finished" Nov 26 22:21:11 crc kubenswrapper[4903]: I1126 22:21:11.985497 4903 reconciler.go:26] "Reconciler: start to sync state" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.011449 4903 manager.go:324] Recovery completed Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.022637 4903 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.027073 4903 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.027148 4903 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.027182 4903 kubelet.go:2335] "Starting kubelet main sync loop" Nov 26 22:21:12 crc kubenswrapper[4903]: E1126 22:21:12.027244 4903 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.030085 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:12 crc kubenswrapper[4903]: W1126 22:21:12.031123 4903 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.219:6443: connect: connection refused Nov 26 22:21:12 crc kubenswrapper[4903]: E1126 22:21:12.031200 4903 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.219:6443: connect: connection refused" logger="UnhandledError" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.032191 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.032220 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.032230 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.033127 4903 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.033271 4903 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.033408 4903 state_mem.go:36] "Initialized new in-memory state store" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.048881 4903 policy_none.go:49] "None policy: Start" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.050039 4903 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.050067 4903 state_mem.go:35] "Initializing new in-memory state store" Nov 26 22:21:12 crc kubenswrapper[4903]: E1126 22:21:12.064553 4903 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.124772 4903 manager.go:334] "Starting Device Plugin manager" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.124855 4903 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.124878 4903 server.go:79] "Starting device plugin registration server" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.125729 4903 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.125760 4903 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.126035 4903 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.126174 4903 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.126185 4903 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.128207 4903 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.128340 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.131167 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.131392 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.131412 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.131593 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.132562 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.132626 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.134914 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.134975 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.134932 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.135014 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.135034 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.134986 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.135267 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.135389 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.135435 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.137230 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.137281 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.137299 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.137496 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.137714 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.137782 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.138632 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.138672 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.138715 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.138871 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.138900 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.138917 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.138921 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.139049 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.139089 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.139586 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.139628 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.139639 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.139965 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.139990 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.140007 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.140269 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.140290 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.140304 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.140489 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.140523 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.141570 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.141631 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.141655 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:12 crc kubenswrapper[4903]: E1126 22:21:12.143802 4903 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 26 22:21:12 crc kubenswrapper[4903]: E1126 22:21:12.165783 4903 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.219:6443: connect: connection refused" interval="400ms" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.187923 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.188001 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.188055 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.188111 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.188167 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.188211 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.188254 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.188294 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.188339 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.188384 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.188426 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.188527 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.188610 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.188658 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.188760 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.225949 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.227338 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.227388 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.227407 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.227439 4903 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 22:21:12 crc kubenswrapper[4903]: E1126 22:21:12.228097 4903 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.219:6443: connect: connection refused" node="crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.290187 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.290246 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.290285 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.290329 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.290360 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.290390 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.290405 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.290437 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.290501 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.290415 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.290454 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.290421 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.290597 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.290626 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.290649 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.290486 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.290681 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.290509 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.290676 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.290737 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.290830 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.290829 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.290865 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.290900 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.290921 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.290964 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.290987 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.291100 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.291134 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.291197 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.428639 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.430381 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.430414 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.430428 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.430452 4903 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 22:21:12 crc kubenswrapper[4903]: E1126 22:21:12.430924 4903 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.219:6443: connect: connection refused" node="crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.485008 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.498719 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.517075 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.539834 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: W1126 22:21:12.541218 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-7b1fad241d1ada0478db87375b3971ee7fdb237aac520f548967e40d9e149d99 WatchSource:0}: Error finding container 7b1fad241d1ada0478db87375b3971ee7fdb237aac520f548967e40d9e149d99: Status 404 returned error can't find the container with id 7b1fad241d1ada0478db87375b3971ee7fdb237aac520f548967e40d9e149d99 Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.550496 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 22:21:12 crc kubenswrapper[4903]: W1126 22:21:12.556419 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-963ef3dbfcb3f0fd94817bbfee6f3995e5a3d9b7ae302378400af3445fe10ff1 WatchSource:0}: Error finding container 963ef3dbfcb3f0fd94817bbfee6f3995e5a3d9b7ae302378400af3445fe10ff1: Status 404 returned error can't find the container with id 963ef3dbfcb3f0fd94817bbfee6f3995e5a3d9b7ae302378400af3445fe10ff1 Nov 26 22:21:12 crc kubenswrapper[4903]: W1126 22:21:12.561555 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-12fbb07a97b0f0dd0c4d4bf2b70e4f848f31ab0c66493c8fc5af2ebc90e01a32 WatchSource:0}: Error finding container 12fbb07a97b0f0dd0c4d4bf2b70e4f848f31ab0c66493c8fc5af2ebc90e01a32: Status 404 returned error can't find the container with id 12fbb07a97b0f0dd0c4d4bf2b70e4f848f31ab0c66493c8fc5af2ebc90e01a32 Nov 26 22:21:12 crc kubenswrapper[4903]: E1126 22:21:12.566421 4903 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.219:6443: connect: connection refused" interval="800ms" Nov 26 22:21:12 crc kubenswrapper[4903]: W1126 22:21:12.586587 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-02d87702ae1e4c9c342d941fdc791d55ffff6dda8c3f2612560d7e0a0210dbf9 WatchSource:0}: Error finding container 02d87702ae1e4c9c342d941fdc791d55ffff6dda8c3f2612560d7e0a0210dbf9: Status 404 returned error can't find the container with id 02d87702ae1e4c9c342d941fdc791d55ffff6dda8c3f2612560d7e0a0210dbf9 Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.831146 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.833023 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.833076 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.833089 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.833120 4903 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 22:21:12 crc kubenswrapper[4903]: E1126 22:21:12.833722 4903 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.219:6443: connect: connection refused" node="crc" Nov 26 22:21:12 crc kubenswrapper[4903]: I1126 22:21:12.961304 4903 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.219:6443: connect: connection refused Nov 26 22:21:13 crc kubenswrapper[4903]: I1126 22:21:13.032865 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"12fbb07a97b0f0dd0c4d4bf2b70e4f848f31ab0c66493c8fc5af2ebc90e01a32"} Nov 26 22:21:13 crc kubenswrapper[4903]: I1126 22:21:13.034275 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"963ef3dbfcb3f0fd94817bbfee6f3995e5a3d9b7ae302378400af3445fe10ff1"} Nov 26 22:21:13 crc kubenswrapper[4903]: I1126 22:21:13.035637 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"7b1fad241d1ada0478db87375b3971ee7fdb237aac520f548967e40d9e149d99"} Nov 26 22:21:13 crc kubenswrapper[4903]: I1126 22:21:13.036887 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"597bbce283aae496500e70a665fce8d5ecf8bcdbd1140afb9be476fb6c44a79b"} Nov 26 22:21:13 crc kubenswrapper[4903]: I1126 22:21:13.038070 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"02d87702ae1e4c9c342d941fdc791d55ffff6dda8c3f2612560d7e0a0210dbf9"} Nov 26 22:21:13 crc kubenswrapper[4903]: W1126 22:21:13.150706 4903 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.219:6443: connect: connection refused Nov 26 22:21:13 crc kubenswrapper[4903]: E1126 22:21:13.150787 4903 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.219:6443: connect: connection refused" logger="UnhandledError" Nov 26 22:21:13 crc kubenswrapper[4903]: W1126 22:21:13.156882 4903 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.219:6443: connect: connection refused Nov 26 22:21:13 crc kubenswrapper[4903]: E1126 22:21:13.156965 4903 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.219:6443: connect: connection refused" logger="UnhandledError" Nov 26 22:21:13 crc kubenswrapper[4903]: W1126 22:21:13.222840 4903 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.219:6443: connect: connection refused Nov 26 22:21:13 crc kubenswrapper[4903]: E1126 22:21:13.223036 4903 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.219:6443: connect: connection refused" logger="UnhandledError" Nov 26 22:21:13 crc kubenswrapper[4903]: E1126 22:21:13.367440 4903 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.219:6443: connect: connection refused" interval="1.6s" Nov 26 22:21:13 crc kubenswrapper[4903]: W1126 22:21:13.561542 4903 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.219:6443: connect: connection refused Nov 26 22:21:13 crc kubenswrapper[4903]: E1126 22:21:13.561734 4903 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.219:6443: connect: connection refused" logger="UnhandledError" Nov 26 22:21:13 crc kubenswrapper[4903]: I1126 22:21:13.633870 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:13 crc kubenswrapper[4903]: I1126 22:21:13.636349 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:13 crc kubenswrapper[4903]: I1126 22:21:13.636424 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:13 crc kubenswrapper[4903]: I1126 22:21:13.636442 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:13 crc kubenswrapper[4903]: I1126 22:21:13.636483 4903 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 22:21:13 crc kubenswrapper[4903]: E1126 22:21:13.637280 4903 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.219:6443: connect: connection refused" node="crc" Nov 26 22:21:13 crc kubenswrapper[4903]: I1126 22:21:13.961535 4903 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.219:6443: connect: connection refused Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.043089 4903 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="20124817ffd11829553b109a1250d7e26b56e6e791dc73550dabbb5a605bcddd" exitCode=0 Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.043239 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"20124817ffd11829553b109a1250d7e26b56e6e791dc73550dabbb5a605bcddd"} Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.043357 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.044854 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.044893 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.044910 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.045580 4903 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b" exitCode=0 Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.045733 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b"} Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.045749 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.047480 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.047552 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.047579 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.049229 4903 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="b1a0fb986ff4e8a7b0a593d07e4d3db62972a7ff1369a2d65a9ec2a2a3660dbd" exitCode=0 Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.049280 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"b1a0fb986ff4e8a7b0a593d07e4d3db62972a7ff1369a2d65a9ec2a2a3660dbd"} Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.049366 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.049668 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.051070 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.051121 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.051144 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.051167 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.051222 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.051245 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.051653 4903 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="5140752950b03c2f8e78fb277116a8ef2df734ac613cf7b1af4c78876547c8ad" exitCode=0 Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.051728 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"5140752950b03c2f8e78fb277116a8ef2df734ac613cf7b1af4c78876547c8ad"} Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.051825 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.054579 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.054652 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.054670 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.056493 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb"} Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.056567 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676"} Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.056629 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49"} Nov 26 22:21:14 crc kubenswrapper[4903]: W1126 22:21:14.785850 4903 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.219:6443: connect: connection refused Nov 26 22:21:14 crc kubenswrapper[4903]: E1126 22:21:14.786010 4903 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.219:6443: connect: connection refused" logger="UnhandledError" Nov 26 22:21:14 crc kubenswrapper[4903]: I1126 22:21:14.962183 4903 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.219:6443: connect: connection refused Nov 26 22:21:14 crc kubenswrapper[4903]: E1126 22:21:14.968376 4903 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.219:6443: connect: connection refused" interval="3.2s" Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.069707 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.069682 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"ef7612eb84c353536c5410b0149cdd9a96cd3a47311a0a49f0865d9f5a1d5dd2"} Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.069763 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"3023824bcd9e429032a9d99b81a4b821b5c9382cefc352f53ad75a87a1088d66"} Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.069781 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"a8c2e4e68e3ec73eb79683a1de97bb7b5ef13c48db46c21b18f4bcee08be716a"} Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.070419 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.070444 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.070454 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.072463 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5"} Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.072599 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.074000 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.074032 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.074045 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.075676 4903 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="ce908c33727f40666f3f673b4de0cbf50ad40f7e5aa61395f0fa914ffd13a647" exitCode=0 Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.075772 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"ce908c33727f40666f3f673b4de0cbf50ad40f7e5aa61395f0fa914ffd13a647"} Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.075846 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.076892 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.076916 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.076927 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.079244 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43"} Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.079270 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d"} Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.079284 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800"} Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.079296 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43"} Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.082535 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"0c79be23ae25bb1672628bae5152c8ba607b3afaa8eaefa0060cbf0480a673fb"} Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.082626 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.083648 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.083677 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.083688 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.237414 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.242257 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.242304 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.242318 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:15 crc kubenswrapper[4903]: I1126 22:21:15.242353 4903 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 22:21:15 crc kubenswrapper[4903]: E1126 22:21:15.242726 4903 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.219:6443: connect: connection refused" node="crc" Nov 26 22:21:15 crc kubenswrapper[4903]: W1126 22:21:15.313158 4903 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.219:6443: connect: connection refused Nov 26 22:21:15 crc kubenswrapper[4903]: E1126 22:21:15.313250 4903 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.219:6443: connect: connection refused" logger="UnhandledError" Nov 26 22:21:16 crc kubenswrapper[4903]: I1126 22:21:16.094504 4903 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="b9013e5af310ab8f4bae338e2888a3df2d39552cdeb3b1c96e151231b8d2b075" exitCode=0 Nov 26 22:21:16 crc kubenswrapper[4903]: I1126 22:21:16.094596 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"b9013e5af310ab8f4bae338e2888a3df2d39552cdeb3b1c96e151231b8d2b075"} Nov 26 22:21:16 crc kubenswrapper[4903]: I1126 22:21:16.094657 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:16 crc kubenswrapper[4903]: I1126 22:21:16.095883 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:16 crc kubenswrapper[4903]: I1126 22:21:16.095925 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:16 crc kubenswrapper[4903]: I1126 22:21:16.095944 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:16 crc kubenswrapper[4903]: I1126 22:21:16.100801 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51"} Nov 26 22:21:16 crc kubenswrapper[4903]: I1126 22:21:16.100872 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:16 crc kubenswrapper[4903]: I1126 22:21:16.100885 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:16 crc kubenswrapper[4903]: I1126 22:21:16.100991 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:16 crc kubenswrapper[4903]: I1126 22:21:16.101000 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 22:21:16 crc kubenswrapper[4903]: I1126 22:21:16.101250 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:16 crc kubenswrapper[4903]: I1126 22:21:16.102389 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:16 crc kubenswrapper[4903]: I1126 22:21:16.102439 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:16 crc kubenswrapper[4903]: I1126 22:21:16.102455 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:16 crc kubenswrapper[4903]: I1126 22:21:16.102394 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:16 crc kubenswrapper[4903]: I1126 22:21:16.102599 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:16 crc kubenswrapper[4903]: I1126 22:21:16.102624 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:16 crc kubenswrapper[4903]: I1126 22:21:16.103309 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:16 crc kubenswrapper[4903]: I1126 22:21:16.103343 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:16 crc kubenswrapper[4903]: I1126 22:21:16.103356 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:16 crc kubenswrapper[4903]: I1126 22:21:16.104361 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:16 crc kubenswrapper[4903]: I1126 22:21:16.104404 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:16 crc kubenswrapper[4903]: I1126 22:21:16.104420 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:17 crc kubenswrapper[4903]: I1126 22:21:17.008919 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 22:21:17 crc kubenswrapper[4903]: I1126 22:21:17.026658 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 22:21:17 crc kubenswrapper[4903]: I1126 22:21:17.109113 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:17 crc kubenswrapper[4903]: I1126 22:21:17.109147 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:17 crc kubenswrapper[4903]: I1126 22:21:17.109176 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"7d1aed8f15e3f60b3f49e124f670707e393be5e8fa45c820696945f7104aa951"} Nov 26 22:21:17 crc kubenswrapper[4903]: I1126 22:21:17.109999 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"263c55bc78b69f063c4b93473f47f7be5461d1664cb02acb3ed96b4a1b709737"} Nov 26 22:21:17 crc kubenswrapper[4903]: I1126 22:21:17.109256 4903 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 22:21:17 crc kubenswrapper[4903]: I1126 22:21:17.110064 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:17 crc kubenswrapper[4903]: I1126 22:21:17.112214 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:17 crc kubenswrapper[4903]: I1126 22:21:17.112407 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:17 crc kubenswrapper[4903]: I1126 22:21:17.112573 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:17 crc kubenswrapper[4903]: I1126 22:21:17.112625 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:17 crc kubenswrapper[4903]: I1126 22:21:17.112825 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:17 crc kubenswrapper[4903]: I1126 22:21:17.112845 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:17 crc kubenswrapper[4903]: I1126 22:21:17.112212 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:17 crc kubenswrapper[4903]: I1126 22:21:17.113109 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:17 crc kubenswrapper[4903]: I1126 22:21:17.113141 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:18 crc kubenswrapper[4903]: I1126 22:21:18.107600 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:21:18 crc kubenswrapper[4903]: I1126 22:21:18.116169 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"652411a04332fa105c64b3a57177d1e1d6af826a5ef80d8602677f7f3206aa50"} Nov 26 22:21:18 crc kubenswrapper[4903]: I1126 22:21:18.116269 4903 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 22:21:18 crc kubenswrapper[4903]: I1126 22:21:18.116312 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:18 crc kubenswrapper[4903]: I1126 22:21:18.116323 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:18 crc kubenswrapper[4903]: I1126 22:21:18.116370 4903 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 22:21:18 crc kubenswrapper[4903]: I1126 22:21:18.116267 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"9e7245c87b0be5d9d96190143786db91719b2d850b6014f6ce3aef171903f622"} Nov 26 22:21:18 crc kubenswrapper[4903]: I1126 22:21:18.116434 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:18 crc kubenswrapper[4903]: I1126 22:21:18.116469 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"731e84a7516dee2c7e69f3bc0fa899872af1ee147b161aa4b44fa6ac6d8a6155"} Nov 26 22:21:18 crc kubenswrapper[4903]: I1126 22:21:18.117597 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:18 crc kubenswrapper[4903]: I1126 22:21:18.117637 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:18 crc kubenswrapper[4903]: I1126 22:21:18.117722 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:18 crc kubenswrapper[4903]: I1126 22:21:18.117742 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:18 crc kubenswrapper[4903]: I1126 22:21:18.117648 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:18 crc kubenswrapper[4903]: I1126 22:21:18.117783 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:18 crc kubenswrapper[4903]: I1126 22:21:18.117936 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:18 crc kubenswrapper[4903]: I1126 22:21:18.117968 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:18 crc kubenswrapper[4903]: I1126 22:21:18.117986 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:18 crc kubenswrapper[4903]: I1126 22:21:18.443637 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:18 crc kubenswrapper[4903]: I1126 22:21:18.446042 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:18 crc kubenswrapper[4903]: I1126 22:21:18.446092 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:18 crc kubenswrapper[4903]: I1126 22:21:18.446105 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:18 crc kubenswrapper[4903]: I1126 22:21:18.446136 4903 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 22:21:19 crc kubenswrapper[4903]: I1126 22:21:19.118530 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:19 crc kubenswrapper[4903]: I1126 22:21:19.119579 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:19 crc kubenswrapper[4903]: I1126 22:21:19.119632 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:19 crc kubenswrapper[4903]: I1126 22:21:19.119653 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:19 crc kubenswrapper[4903]: I1126 22:21:19.479588 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:21:19 crc kubenswrapper[4903]: I1126 22:21:19.479795 4903 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 22:21:19 crc kubenswrapper[4903]: I1126 22:21:19.479844 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:19 crc kubenswrapper[4903]: I1126 22:21:19.481159 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:19 crc kubenswrapper[4903]: I1126 22:21:19.481207 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:19 crc kubenswrapper[4903]: I1126 22:21:19.481218 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:19 crc kubenswrapper[4903]: I1126 22:21:19.713426 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 26 22:21:20 crc kubenswrapper[4903]: I1126 22:21:20.122081 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:20 crc kubenswrapper[4903]: I1126 22:21:20.123489 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:20 crc kubenswrapper[4903]: I1126 22:21:20.123547 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:20 crc kubenswrapper[4903]: I1126 22:21:20.123565 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:20 crc kubenswrapper[4903]: I1126 22:21:20.144536 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:21:20 crc kubenswrapper[4903]: I1126 22:21:20.144757 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:20 crc kubenswrapper[4903]: I1126 22:21:20.146154 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:20 crc kubenswrapper[4903]: I1126 22:21:20.146205 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:20 crc kubenswrapper[4903]: I1126 22:21:20.146223 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:21 crc kubenswrapper[4903]: I1126 22:21:21.773150 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 26 22:21:21 crc kubenswrapper[4903]: I1126 22:21:21.773600 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:21 crc kubenswrapper[4903]: I1126 22:21:21.775929 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:21 crc kubenswrapper[4903]: I1126 22:21:21.776003 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:21 crc kubenswrapper[4903]: I1126 22:21:21.776022 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:22 crc kubenswrapper[4903]: E1126 22:21:22.144810 4903 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 26 22:21:22 crc kubenswrapper[4903]: I1126 22:21:22.539261 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 22:21:22 crc kubenswrapper[4903]: I1126 22:21:22.539578 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:22 crc kubenswrapper[4903]: I1126 22:21:22.541568 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:22 crc kubenswrapper[4903]: I1126 22:21:22.541621 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:22 crc kubenswrapper[4903]: I1126 22:21:22.541643 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:23 crc kubenswrapper[4903]: I1126 22:21:23.455894 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 22:21:23 crc kubenswrapper[4903]: I1126 22:21:23.456974 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:23 crc kubenswrapper[4903]: I1126 22:21:23.459024 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:23 crc kubenswrapper[4903]: I1126 22:21:23.459094 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:23 crc kubenswrapper[4903]: I1126 22:21:23.459113 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:23 crc kubenswrapper[4903]: I1126 22:21:23.464367 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 22:21:23 crc kubenswrapper[4903]: I1126 22:21:23.764222 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 22:21:24 crc kubenswrapper[4903]: I1126 22:21:24.134671 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:24 crc kubenswrapper[4903]: I1126 22:21:24.135983 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:24 crc kubenswrapper[4903]: I1126 22:21:24.136029 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:24 crc kubenswrapper[4903]: I1126 22:21:24.136047 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:25 crc kubenswrapper[4903]: I1126 22:21:25.136801 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:25 crc kubenswrapper[4903]: I1126 22:21:25.137973 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:25 crc kubenswrapper[4903]: I1126 22:21:25.138013 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:25 crc kubenswrapper[4903]: I1126 22:21:25.138031 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:25 crc kubenswrapper[4903]: W1126 22:21:25.914312 4903 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 26 22:21:25 crc kubenswrapper[4903]: I1126 22:21:25.914479 4903 trace.go:236] Trace[1113425661]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (26-Nov-2025 22:21:15.913) (total time: 10001ms): Nov 26 22:21:25 crc kubenswrapper[4903]: Trace[1113425661]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (22:21:25.914) Nov 26 22:21:25 crc kubenswrapper[4903]: Trace[1113425661]: [10.001426766s] [10.001426766s] END Nov 26 22:21:25 crc kubenswrapper[4903]: E1126 22:21:25.914514 4903 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 26 22:21:25 crc kubenswrapper[4903]: I1126 22:21:25.962278 4903 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 26 22:21:26 crc kubenswrapper[4903]: W1126 22:21:26.329325 4903 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 26 22:21:26 crc kubenswrapper[4903]: I1126 22:21:26.329450 4903 trace.go:236] Trace[1315677566]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (26-Nov-2025 22:21:16.327) (total time: 10001ms): Nov 26 22:21:26 crc kubenswrapper[4903]: Trace[1315677566]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (22:21:26.329) Nov 26 22:21:26 crc kubenswrapper[4903]: Trace[1315677566]: [10.001578889s] [10.001578889s] END Nov 26 22:21:26 crc kubenswrapper[4903]: E1126 22:21:26.329481 4903 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 26 22:21:26 crc kubenswrapper[4903]: I1126 22:21:26.764147 4903 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 26 22:21:26 crc kubenswrapper[4903]: I1126 22:21:26.764258 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 26 22:21:27 crc kubenswrapper[4903]: I1126 22:21:27.317844 4903 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 26 22:21:27 crc kubenswrapper[4903]: I1126 22:21:27.317930 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 26 22:21:27 crc kubenswrapper[4903]: I1126 22:21:27.328114 4903 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 26 22:21:27 crc kubenswrapper[4903]: I1126 22:21:27.328182 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 26 22:21:28 crc kubenswrapper[4903]: I1126 22:21:28.113945 4903 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]log ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]etcd ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/openshift.io-api-request-count-filter ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/openshift.io-startkubeinformers ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/generic-apiserver-start-informers ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/priority-and-fairness-config-consumer ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/priority-and-fairness-filter ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/start-apiextensions-informers ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/start-apiextensions-controllers ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/crd-informer-synced ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/start-system-namespaces-controller ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/start-cluster-authentication-info-controller ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/start-legacy-token-tracking-controller ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/start-service-ip-repair-controllers ok Nov 26 22:21:28 crc kubenswrapper[4903]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Nov 26 22:21:28 crc kubenswrapper[4903]: [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/priority-and-fairness-config-producer ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/bootstrap-controller ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/start-kube-aggregator-informers ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/apiservice-status-local-available-controller ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/apiservice-status-remote-available-controller ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/apiservice-registration-controller ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/apiservice-wait-for-first-sync ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/apiservice-discovery-controller ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/kube-apiserver-autoregistration ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]autoregister-completion ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/apiservice-openapi-controller ok Nov 26 22:21:28 crc kubenswrapper[4903]: [+]poststarthook/apiservice-openapiv3-controller ok Nov 26 22:21:28 crc kubenswrapper[4903]: livez check failed Nov 26 22:21:28 crc kubenswrapper[4903]: I1126 22:21:28.114046 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 22:21:29 crc kubenswrapper[4903]: I1126 22:21:29.422991 4903 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 26 22:21:29 crc kubenswrapper[4903]: I1126 22:21:29.751664 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 26 22:21:29 crc kubenswrapper[4903]: I1126 22:21:29.752012 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:29 crc kubenswrapper[4903]: I1126 22:21:29.753393 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:29 crc kubenswrapper[4903]: I1126 22:21:29.753444 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:29 crc kubenswrapper[4903]: I1126 22:21:29.753459 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:29 crc kubenswrapper[4903]: I1126 22:21:29.767847 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 26 22:21:30 crc kubenswrapper[4903]: I1126 22:21:30.150867 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:30 crc kubenswrapper[4903]: I1126 22:21:30.152004 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:30 crc kubenswrapper[4903]: I1126 22:21:30.152039 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:30 crc kubenswrapper[4903]: I1126 22:21:30.152051 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:32 crc kubenswrapper[4903]: I1126 22:21:32.030312 4903 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 26 22:21:32 crc kubenswrapper[4903]: E1126 22:21:32.293189 4903 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Nov 26 22:21:32 crc kubenswrapper[4903]: I1126 22:21:32.295161 4903 trace.go:236] Trace[1895566431]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (26-Nov-2025 22:21:19.520) (total time: 12774ms): Nov 26 22:21:32 crc kubenswrapper[4903]: Trace[1895566431]: ---"Objects listed" error: 12774ms (22:21:32.295) Nov 26 22:21:32 crc kubenswrapper[4903]: Trace[1895566431]: [12.774347496s] [12.774347496s] END Nov 26 22:21:32 crc kubenswrapper[4903]: I1126 22:21:32.295190 4903 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 26 22:21:32 crc kubenswrapper[4903]: I1126 22:21:32.298393 4903 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 26 22:21:32 crc kubenswrapper[4903]: E1126 22:21:32.299599 4903 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 26 22:21:32 crc kubenswrapper[4903]: I1126 22:21:32.300966 4903 trace.go:236] Trace[802858100]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (26-Nov-2025 22:21:19.737) (total time: 12563ms): Nov 26 22:21:32 crc kubenswrapper[4903]: Trace[802858100]: ---"Objects listed" error: 12563ms (22:21:32.300) Nov 26 22:21:32 crc kubenswrapper[4903]: Trace[802858100]: [12.563290427s] [12.563290427s] END Nov 26 22:21:32 crc kubenswrapper[4903]: I1126 22:21:32.300989 4903 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 26 22:21:32 crc kubenswrapper[4903]: I1126 22:21:32.342512 4903 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:46206->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 26 22:21:32 crc kubenswrapper[4903]: I1126 22:21:32.342619 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:46206->192.168.126.11:17697: read: connection reset by peer" Nov 26 22:21:32 crc kubenswrapper[4903]: I1126 22:21:32.956618 4903 apiserver.go:52] "Watching apiserver" Nov 26 22:21:32 crc kubenswrapper[4903]: I1126 22:21:32.959861 4903 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 26 22:21:32 crc kubenswrapper[4903]: I1126 22:21:32.960351 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf"] Nov 26 22:21:32 crc kubenswrapper[4903]: I1126 22:21:32.960838 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 22:21:32 crc kubenswrapper[4903]: I1126 22:21:32.961608 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:21:32 crc kubenswrapper[4903]: E1126 22:21:32.961733 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:21:32 crc kubenswrapper[4903]: I1126 22:21:32.962042 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:21:32 crc kubenswrapper[4903]: E1126 22:21:32.962158 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:21:32 crc kubenswrapper[4903]: I1126 22:21:32.963157 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 22:21:32 crc kubenswrapper[4903]: I1126 22:21:32.963231 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 26 22:21:32 crc kubenswrapper[4903]: I1126 22:21:32.963395 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:21:32 crc kubenswrapper[4903]: E1126 22:21:32.963487 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:21:32 crc kubenswrapper[4903]: I1126 22:21:32.963748 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 22:21:32 crc kubenswrapper[4903]: I1126 22:21:32.965714 4903 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 26 22:21:32 crc kubenswrapper[4903]: I1126 22:21:32.967394 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 26 22:21:32 crc kubenswrapper[4903]: I1126 22:21:32.967817 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 26 22:21:32 crc kubenswrapper[4903]: I1126 22:21:32.967821 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 26 22:21:32 crc kubenswrapper[4903]: I1126 22:21:32.967913 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 26 22:21:32 crc kubenswrapper[4903]: I1126 22:21:32.968046 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 26 22:21:32 crc kubenswrapper[4903]: I1126 22:21:32.968482 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 26 22:21:32 crc kubenswrapper[4903]: I1126 22:21:32.968755 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 26 22:21:32 crc kubenswrapper[4903]: I1126 22:21:32.970681 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 26 22:21:32 crc kubenswrapper[4903]: I1126 22:21:32.993069 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.003197 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.003242 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.003270 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.003296 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.003316 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.003336 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.003353 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.003375 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.003401 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.003420 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.003441 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.003457 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.003517 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.003533 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.003548 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.003561 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.003575 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.003580 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.003631 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.003861 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.003897 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.003901 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004006 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004016 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004003 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004088 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004036 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004054 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004164 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004174 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004191 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004224 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004244 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004221 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004226 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004264 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004314 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004337 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004348 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004357 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004385 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004385 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004393 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004411 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004440 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004489 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004509 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004528 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004549 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004568 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004590 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004612 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004634 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004747 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004771 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004788 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004804 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004821 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004839 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004857 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004875 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004894 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004911 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004928 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004946 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004964 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004987 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005005 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005021 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005039 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005065 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005083 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005100 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005119 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005138 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005154 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005173 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005190 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005207 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005224 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005241 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005257 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005275 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005293 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005321 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005339 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004528 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005373 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004638 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004725 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004866 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004930 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.004958 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005475 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005032 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005495 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005162 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005312 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005524 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005318 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005641 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005732 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005778 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005819 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005919 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005955 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.003637 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.005356 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.006199 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.006215 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.006365 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.006815 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.006843 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.006864 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.006884 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.006907 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.006925 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.006948 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.006967 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.006988 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.007006 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.007026 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.007043 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.007059 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.007106 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: E1126 22:21:33.007193 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:21:33.507168182 +0000 UTC m=+22.197403082 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.007351 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.007113 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.007392 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.007434 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.007575 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.007594 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.007612 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.007639 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.007663 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.007678 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.007707 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.007733 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.007748 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.007763 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.007779 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.007803 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.007818 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.007834 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.007852 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.007912 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.007932 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008103 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008241 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008330 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008352 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008360 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008376 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008372 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008395 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008414 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008433 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008449 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008478 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008493 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008510 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008526 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008542 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008578 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008594 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008611 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008626 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008644 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008661 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008704 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008721 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008748 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008767 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.009364 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.009718 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.009657 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.009744 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.010925 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.010962 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.010988 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011007 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011029 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011049 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011095 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011114 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011131 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011148 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011164 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011180 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011195 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011211 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011226 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011243 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011258 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011276 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011292 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011309 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011324 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011340 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011358 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011375 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011394 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011412 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011428 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011445 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011462 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011478 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011495 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011511 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011529 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011569 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011592 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011618 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011640 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011663 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011681 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011742 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011765 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011785 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011810 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011835 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011860 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011878 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011901 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011929 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011952 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011977 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012004 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012034 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012058 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012081 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012099 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012117 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012133 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012153 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012171 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012189 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012211 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012230 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012248 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012266 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012288 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012319 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012337 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012359 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012416 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012447 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012488 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012513 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012559 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012593 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012627 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012651 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012681 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012739 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012770 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012798 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012820 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012836 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012853 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012873 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012891 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012911 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013003 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013020 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013034 4903 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013046 4903 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013056 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013068 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013077 4903 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013087 4903 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013097 4903 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013107 4903 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013116 4903 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013126 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013136 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013146 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013155 4903 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013164 4903 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013175 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013185 4903 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013195 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013204 4903 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013214 4903 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013224 4903 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013235 4903 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013248 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013257 4903 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013268 4903 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013278 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013287 4903 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013296 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013305 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013316 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013325 4903 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013334 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013343 4903 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013354 4903 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013364 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013374 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013384 4903 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013394 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013404 4903 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013415 4903 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013427 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013437 4903 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013447 4903 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013457 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013466 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013475 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013486 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013497 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013509 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013519 4903 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013530 4903 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.020514 4903 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.020822 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008719 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008726 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.021473 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008738 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.007311 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008799 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008808 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.008851 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.009096 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.009094 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.009183 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.009276 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.010013 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.010024 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: E1126 22:21:33.021539 4903 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 22:21:33 crc kubenswrapper[4903]: E1126 22:21:33.021635 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 22:21:33.521615769 +0000 UTC m=+22.211850679 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.021830 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.010431 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.010406 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.010524 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.010709 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.010817 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.010813 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011649 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011734 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.011898 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012106 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012531 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012985 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.012998 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013179 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013393 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013747 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013854 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.013852 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.014077 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.014094 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.014159 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.014395 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.014424 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.014428 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.014670 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.014716 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.015327 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.015163 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.015840 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.016160 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.018259 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.018298 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.018592 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.018735 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.018894 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.019008 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.019389 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.019613 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.019663 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.019766 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.022349 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.019679 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.020052 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.020189 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.020206 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.020566 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.020780 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.020918 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.021001 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.021216 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.021309 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.021302 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.010093 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.020401 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.025049 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.025282 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.025291 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.026330 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.027988 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.028150 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.028361 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.028439 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: E1126 22:21:33.028455 4903 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 22:21:33 crc kubenswrapper[4903]: E1126 22:21:33.028533 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 22:21:33.528517129 +0000 UTC m=+22.218752039 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.028845 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.028877 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: E1126 22:21:33.028974 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 22:21:33 crc kubenswrapper[4903]: E1126 22:21:33.028993 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 22:21:33 crc kubenswrapper[4903]: E1126 22:21:33.029004 4903 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.029004 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: E1126 22:21:33.029042 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 22:21:33.529032012 +0000 UTC m=+22.219266922 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.029227 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.029708 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.030128 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.030968 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.031188 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.031324 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.031503 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.031734 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 22:21:33 crc kubenswrapper[4903]: E1126 22:21:33.031954 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 22:21:33 crc kubenswrapper[4903]: E1126 22:21:33.031994 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 22:21:33 crc kubenswrapper[4903]: E1126 22:21:33.032050 4903 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:21:33 crc kubenswrapper[4903]: E1126 22:21:33.032116 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 22:21:33.532081047 +0000 UTC m=+22.222315957 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.032252 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.032529 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.035226 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.035410 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.035585 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.035673 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.036362 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.037089 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.037564 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.037638 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.037783 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.037744 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.038008 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.038040 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.038341 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.038348 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.041039 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.041482 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.041831 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.042331 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.044148 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.057035 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.057930 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.058839 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.058871 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.059295 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.059540 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.067189 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.068291 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.070892 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.071279 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.071747 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.071791 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.072290 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.072343 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.072355 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.072404 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.072544 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.072687 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.072848 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.073127 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.073215 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.073476 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.073820 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.078893 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.087015 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.088335 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.088416 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.088469 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.094001 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.096545 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.096655 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.096957 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.097433 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.097626 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.097767 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.097897 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.098892 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.099702 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.100479 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.106901 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.116560 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.116919 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.117047 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.117227 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.117246 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.117579 4903 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.117664 4903 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.117781 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.118849 4903 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.118889 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.118909 4903 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.118924 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.118936 4903 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.118952 4903 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.118938 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.118986 4903 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119011 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119027 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119041 4903 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119058 4903 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119071 4903 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119084 4903 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119093 4903 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119106 4903 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119122 4903 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119194 4903 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119214 4903 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119243 4903 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119257 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119269 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119290 4903 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119302 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119333 4903 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119346 4903 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119364 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119379 4903 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119396 4903 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119409 4903 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119424 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119434 4903 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119444 4903 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119455 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119474 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119502 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119516 4903 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119532 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119541 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119550 4903 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119559 4903 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119571 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119581 4903 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119591 4903 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119606 4903 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119617 4903 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119629 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119639 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119682 4903 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119709 4903 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119719 4903 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119729 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119741 4903 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119750 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119759 4903 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119768 4903 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119782 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119793 4903 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119803 4903 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119819 4903 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119831 4903 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119839 4903 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119849 4903 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119860 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119870 4903 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119884 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119893 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119904 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119915 4903 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119924 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119933 4903 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119961 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119970 4903 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119979 4903 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.119992 4903 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120012 4903 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120021 4903 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120029 4903 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120041 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120050 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120061 4903 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120072 4903 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120084 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120093 4903 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120103 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120117 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120126 4903 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120136 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120145 4903 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120158 4903 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120178 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120188 4903 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120198 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120209 4903 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120218 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120228 4903 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120240 4903 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120252 4903 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120265 4903 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120277 4903 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120295 4903 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120304 4903 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120314 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120326 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120349 4903 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120358 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120367 4903 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120376 4903 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120388 4903 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120397 4903 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120406 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120418 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120428 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120437 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120447 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120458 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120480 4903 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120515 4903 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120525 4903 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120538 4903 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120546 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120555 4903 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120565 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120577 4903 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120586 4903 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120595 4903 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120606 4903 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120615 4903 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120625 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120635 4903 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120647 4903 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120656 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120665 4903 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120673 4903 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120684 4903 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120713 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120723 4903 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120736 4903 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.120755 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.122576 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.123091 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.130334 4903 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.130451 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.130896 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.135399 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.146819 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.147112 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.158375 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.160426 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.162212 4903 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51" exitCode=255 Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.162256 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51"} Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.169359 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.177717 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.193052 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.201122 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.221368 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.221390 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.221399 4903 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.230784 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.277854 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.284755 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.291875 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 22:21:33 crc kubenswrapper[4903]: W1126 22:21:33.300824 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-d6817825b75aad094fb7fdeb48fdc213c15219e2f70ad3f2d6e35b3452609382 WatchSource:0}: Error finding container d6817825b75aad094fb7fdeb48fdc213c15219e2f70ad3f2d6e35b3452609382: Status 404 returned error can't find the container with id d6817825b75aad094fb7fdeb48fdc213c15219e2f70ad3f2d6e35b3452609382 Nov 26 22:21:33 crc kubenswrapper[4903]: W1126 22:21:33.302096 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-2436d47679eb052fda07f3bfce252b4d1d808a3c49c348de828b2223dc3a0e1f WatchSource:0}: Error finding container 2436d47679eb052fda07f3bfce252b4d1d808a3c49c348de828b2223dc3a0e1f: Status 404 returned error can't find the container with id 2436d47679eb052fda07f3bfce252b4d1d808a3c49c348de828b2223dc3a0e1f Nov 26 22:21:33 crc kubenswrapper[4903]: W1126 22:21:33.304893 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-1137bd9eb4cb6cb44967352676af94d3ecc5cef97484951a3dd34d3f4054b38a WatchSource:0}: Error finding container 1137bd9eb4cb6cb44967352676af94d3ecc5cef97484951a3dd34d3f4054b38a: Status 404 returned error can't find the container with id 1137bd9eb4cb6cb44967352676af94d3ecc5cef97484951a3dd34d3f4054b38a Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.523475 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.523540 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:21:33 crc kubenswrapper[4903]: E1126 22:21:33.523652 4903 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 22:21:33 crc kubenswrapper[4903]: E1126 22:21:33.523676 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:21:34.52364798 +0000 UTC m=+23.213882890 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:21:33 crc kubenswrapper[4903]: E1126 22:21:33.523722 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 22:21:34.523714692 +0000 UTC m=+23.213949602 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.624817 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.624855 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.624878 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:21:33 crc kubenswrapper[4903]: E1126 22:21:33.624979 4903 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 22:21:33 crc kubenswrapper[4903]: E1126 22:21:33.625076 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 22:21:34.625057285 +0000 UTC m=+23.315292195 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 22:21:33 crc kubenswrapper[4903]: E1126 22:21:33.624993 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 22:21:33 crc kubenswrapper[4903]: E1126 22:21:33.625092 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 22:21:33 crc kubenswrapper[4903]: E1126 22:21:33.625131 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 22:21:33 crc kubenswrapper[4903]: E1126 22:21:33.625148 4903 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:21:33 crc kubenswrapper[4903]: E1126 22:21:33.625107 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 22:21:33 crc kubenswrapper[4903]: E1126 22:21:33.625211 4903 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:21:33 crc kubenswrapper[4903]: E1126 22:21:33.625199 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 22:21:34.625182589 +0000 UTC m=+23.315417489 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:21:33 crc kubenswrapper[4903]: E1126 22:21:33.625268 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 22:21:34.625255211 +0000 UTC m=+23.315490121 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.768914 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.771949 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.777027 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.783231 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.794014 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.803808 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.814131 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.824937 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.835152 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.846365 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.858745 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.870016 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.887161 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.900114 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.915208 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.930742 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.947814 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:33 crc kubenswrapper[4903]: I1126 22:21:33.961549 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.028336 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:21:34 crc kubenswrapper[4903]: E1126 22:21:34.028590 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.033482 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.034220 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.035262 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.036089 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.036949 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.038139 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.038994 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.039813 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.040624 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.041396 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.044335 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.045384 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.046508 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.047203 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.048436 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.049151 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.049882 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.050877 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.051589 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.052323 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.053479 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.054253 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.055341 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.056290 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.056863 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.058212 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.059634 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.060333 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.061113 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.062189 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.062815 4903 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.062950 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.065605 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.066301 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.066896 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.069057 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.070337 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.070999 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.072314 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.073233 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.074339 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.075150 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.076979 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.077860 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.078500 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.079271 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.079954 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.081083 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.081745 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.082405 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.083044 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.083748 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.084554 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.086657 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.094796 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-2z7vf"] Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.095399 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-wjwph"] Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.095587 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.096360 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.098358 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.098373 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.098662 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.099050 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-bbznt"] Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.099977 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.100093 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.100197 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.100121 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.100047 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.100333 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-bxnsh"] Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.101061 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.102242 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.102405 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.102541 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-dlvd4"] Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.102845 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-dlvd4" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.106366 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.106702 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.106715 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.107011 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.107411 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.107746 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.108297 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.109265 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.109718 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.109772 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.110211 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.110380 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.110267 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.118936 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.134469 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.147799 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.160260 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.167206 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454"} Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.167359 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439"} Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.167461 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"2436d47679eb052fda07f3bfce252b4d1d808a3c49c348de828b2223dc3a0e1f"} Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.169326 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f"} Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.169381 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"d6817825b75aad094fb7fdeb48fdc213c15219e2f70ad3f2d6e35b3452609382"} Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.170232 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"1137bd9eb4cb6cb44967352676af94d3ecc5cef97484951a3dd34d3f4054b38a"} Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.172073 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:34 crc kubenswrapper[4903]: E1126 22:21:34.177406 4903 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-controller-manager-crc\" already exists" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 22:21:34 crc kubenswrapper[4903]: E1126 22:21:34.177485 4903 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-apiserver-crc\" already exists" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.177664 4903 scope.go:117] "RemoveContainer" containerID="c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.183385 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.196164 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.211240 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.228502 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:34Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.230846 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fnc94\" (UniqueName: \"kubernetes.io/projected/701c1e82-a66e-40d9-884e-2d59449edccc-kube-api-access-fnc94\") pod \"node-resolver-dlvd4\" (UID: \"701c1e82-a66e-40d9-884e-2d59449edccc\") " pod="openshift-dns/node-resolver-dlvd4" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.230903 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-slash\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.230926 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-run-ovn\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.230944 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-host-var-lib-kubelet\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.230995 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-kubelet\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.231017 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/229974d7-7b78-434b-a346-8b9004e69bf2-multus-daemon-config\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.231099 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/4943d6ca-5152-4ac1-a9d3-850d5a5063b7-os-release\") pod \"multus-additional-cni-plugins-2z7vf\" (UID: \"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\") " pod="openshift-multus/multus-additional-cni-plugins-2z7vf" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.231224 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-run-ovn-kubernetes\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.231254 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-ovn-node-metrics-cert\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.231299 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4943d6ca-5152-4ac1-a9d3-850d5a5063b7-cni-binary-copy\") pod \"multus-additional-cni-plugins-2z7vf\" (UID: \"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\") " pod="openshift-multus/multus-additional-cni-plugins-2z7vf" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.231347 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/4943d6ca-5152-4ac1-a9d3-850d5a5063b7-tuning-conf-dir\") pod \"multus-additional-cni-plugins-2z7vf\" (UID: \"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\") " pod="openshift-multus/multus-additional-cni-plugins-2z7vf" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.231402 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-run-systemd\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.231490 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-log-socket\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.231550 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-host-var-lib-cni-multus\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.231720 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/232b7aad-b4bd-495a-a411-0cfd48fa372c-mcd-auth-proxy-config\") pod \"machine-config-daemon-wjwph\" (UID: \"232b7aad-b4bd-495a-a411-0cfd48fa372c\") " pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.231763 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zt8rx\" (UniqueName: \"kubernetes.io/projected/4943d6ca-5152-4ac1-a9d3-850d5a5063b7-kube-api-access-zt8rx\") pod \"multus-additional-cni-plugins-2z7vf\" (UID: \"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\") " pod="openshift-multus/multus-additional-cni-plugins-2z7vf" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.231864 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-system-cni-dir\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.231944 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-cnibin\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.232000 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-host-run-netns\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.232032 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-hostroot\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.232094 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/701c1e82-a66e-40d9-884e-2d59449edccc-hosts-file\") pod \"node-resolver-dlvd4\" (UID: \"701c1e82-a66e-40d9-884e-2d59449edccc\") " pod="openshift-dns/node-resolver-dlvd4" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.232118 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/4943d6ca-5152-4ac1-a9d3-850d5a5063b7-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-2z7vf\" (UID: \"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\") " pod="openshift-multus/multus-additional-cni-plugins-2z7vf" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.232295 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-etc-openvswitch\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.232360 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-cni-netd\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.232394 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-multus-cni-dir\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.232436 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-env-overrides\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.232498 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrnqs\" (UniqueName: \"kubernetes.io/projected/232b7aad-b4bd-495a-a411-0cfd48fa372c-kube-api-access-rrnqs\") pod \"machine-config-daemon-wjwph\" (UID: \"232b7aad-b4bd-495a-a411-0cfd48fa372c\") " pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.232521 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-run-netns\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.232559 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-var-lib-openvswitch\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.232579 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-os-release\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.232603 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-multus-socket-dir-parent\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.232626 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-host-run-multus-certs\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.232674 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/232b7aad-b4bd-495a-a411-0cfd48fa372c-rootfs\") pod \"machine-config-daemon-wjwph\" (UID: \"232b7aad-b4bd-495a-a411-0cfd48fa372c\") " pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.232715 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-node-log\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.232751 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-72nv6\" (UniqueName: \"kubernetes.io/projected/229974d7-7b78-434b-a346-8b9004e69bf2-kube-api-access-72nv6\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.232772 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-cni-bin\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.232791 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-ovnkube-config\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.232812 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-multus-conf-dir\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.232858 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-ovnkube-script-lib\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.232880 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/4943d6ca-5152-4ac1-a9d3-850d5a5063b7-system-cni-dir\") pod \"multus-additional-cni-plugins-2z7vf\" (UID: \"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\") " pod="openshift-multus/multus-additional-cni-plugins-2z7vf" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.232902 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-systemd-units\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.232921 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-run-openvswitch\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.232943 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8mpcc\" (UniqueName: \"kubernetes.io/projected/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-kube-api-access-8mpcc\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.232963 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/229974d7-7b78-434b-a346-8b9004e69bf2-cni-binary-copy\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.232983 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-host-run-k8s-cni-cncf-io\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.233005 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-host-var-lib-cni-bin\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.233032 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-etc-kubernetes\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.233073 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/232b7aad-b4bd-495a-a411-0cfd48fa372c-proxy-tls\") pod \"machine-config-daemon-wjwph\" (UID: \"232b7aad-b4bd-495a-a411-0cfd48fa372c\") " pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.233096 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.233119 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/4943d6ca-5152-4ac1-a9d3-850d5a5063b7-cnibin\") pod \"multus-additional-cni-plugins-2z7vf\" (UID: \"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\") " pod="openshift-multus/multus-additional-cni-plugins-2z7vf" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.243643 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:34Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.263815 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:34Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.285072 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:34Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.300258 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:34Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.315597 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:34Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.330739 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:34Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.333751 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-var-lib-openvswitch\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.333805 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/232b7aad-b4bd-495a-a411-0cfd48fa372c-rootfs\") pod \"machine-config-daemon-wjwph\" (UID: \"232b7aad-b4bd-495a-a411-0cfd48fa372c\") " pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.333830 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-node-log\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.333852 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-os-release\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.333876 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-multus-socket-dir-parent\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.333899 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-host-run-multus-certs\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.333924 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-node-log\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.333931 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-72nv6\" (UniqueName: \"kubernetes.io/projected/229974d7-7b78-434b-a346-8b9004e69bf2-kube-api-access-72nv6\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.333995 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-cni-bin\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.334021 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-ovnkube-config\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.334052 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-multus-conf-dir\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.334075 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-ovnkube-script-lib\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.334099 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/4943d6ca-5152-4ac1-a9d3-850d5a5063b7-system-cni-dir\") pod \"multus-additional-cni-plugins-2z7vf\" (UID: \"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\") " pod="openshift-multus/multus-additional-cni-plugins-2z7vf" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.334120 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-systemd-units\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.334095 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-host-run-multus-certs\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.334161 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-run-openvswitch\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.334139 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-run-openvswitch\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.334064 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/232b7aad-b4bd-495a-a411-0cfd48fa372c-rootfs\") pod \"machine-config-daemon-wjwph\" (UID: \"232b7aad-b4bd-495a-a411-0cfd48fa372c\") " pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.334170 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-multus-socket-dir-parent\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.334205 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/4943d6ca-5152-4ac1-a9d3-850d5a5063b7-system-cni-dir\") pod \"multus-additional-cni-plugins-2z7vf\" (UID: \"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\") " pod="openshift-multus/multus-additional-cni-plugins-2z7vf" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.334204 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8mpcc\" (UniqueName: \"kubernetes.io/projected/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-kube-api-access-8mpcc\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.334282 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-systemd-units\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.334358 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/232b7aad-b4bd-495a-a411-0cfd48fa372c-proxy-tls\") pod \"machine-config-daemon-wjwph\" (UID: \"232b7aad-b4bd-495a-a411-0cfd48fa372c\") " pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.334421 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.334441 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/229974d7-7b78-434b-a346-8b9004e69bf2-cni-binary-copy\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.334457 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-host-run-k8s-cni-cncf-io\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.334474 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-host-var-lib-cni-bin\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.334490 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-etc-kubernetes\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.334507 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/4943d6ca-5152-4ac1-a9d3-850d5a5063b7-cnibin\") pod \"multus-additional-cni-plugins-2z7vf\" (UID: \"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\") " pod="openshift-multus/multus-additional-cni-plugins-2z7vf" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.334516 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-cni-bin\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.334542 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-os-release\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.334571 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-host-var-lib-cni-bin\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.334607 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-host-run-k8s-cni-cncf-io\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.334630 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.334631 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-etc-kubernetes\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.334565 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-multus-conf-dir\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.334820 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/4943d6ca-5152-4ac1-a9d3-850d5a5063b7-cnibin\") pod \"multus-additional-cni-plugins-2z7vf\" (UID: \"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\") " pod="openshift-multus/multus-additional-cni-plugins-2z7vf" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.335156 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-ovnkube-config\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.335250 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-var-lib-openvswitch\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.335794 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-ovnkube-script-lib\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.334540 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fnc94\" (UniqueName: \"kubernetes.io/projected/701c1e82-a66e-40d9-884e-2d59449edccc-kube-api-access-fnc94\") pod \"node-resolver-dlvd4\" (UID: \"701c1e82-a66e-40d9-884e-2d59449edccc\") " pod="openshift-dns/node-resolver-dlvd4" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.336053 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-slash\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.336076 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-run-ovn\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.336099 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-host-var-lib-kubelet\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.336129 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-slash\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.336140 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-kubelet\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.336194 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-run-ovn\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.336215 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/229974d7-7b78-434b-a346-8b9004e69bf2-multus-daemon-config\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.336228 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-host-var-lib-kubelet\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.336173 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-kubelet\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.336259 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/4943d6ca-5152-4ac1-a9d3-850d5a5063b7-os-release\") pod \"multus-additional-cni-plugins-2z7vf\" (UID: \"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\") " pod="openshift-multus/multus-additional-cni-plugins-2z7vf" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.336304 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-run-ovn-kubernetes\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.336335 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-ovn-node-metrics-cert\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.336370 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-run-systemd\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.336399 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-log-socket\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.336421 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-host-var-lib-cni-multus\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.336447 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4943d6ca-5152-4ac1-a9d3-850d5a5063b7-cni-binary-copy\") pod \"multus-additional-cni-plugins-2z7vf\" (UID: \"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\") " pod="openshift-multus/multus-additional-cni-plugins-2z7vf" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.336446 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/229974d7-7b78-434b-a346-8b9004e69bf2-cni-binary-copy\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.336469 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/4943d6ca-5152-4ac1-a9d3-850d5a5063b7-tuning-conf-dir\") pod \"multus-additional-cni-plugins-2z7vf\" (UID: \"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\") " pod="openshift-multus/multus-additional-cni-plugins-2z7vf" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.336588 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/232b7aad-b4bd-495a-a411-0cfd48fa372c-mcd-auth-proxy-config\") pod \"machine-config-daemon-wjwph\" (UID: \"232b7aad-b4bd-495a-a411-0cfd48fa372c\") " pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.336612 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-run-systemd\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.336634 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zt8rx\" (UniqueName: \"kubernetes.io/projected/4943d6ca-5152-4ac1-a9d3-850d5a5063b7-kube-api-access-zt8rx\") pod \"multus-additional-cni-plugins-2z7vf\" (UID: \"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\") " pod="openshift-multus/multus-additional-cni-plugins-2z7vf" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.336676 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-system-cni-dir\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.336738 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-run-ovn-kubernetes\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.336765 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-cnibin\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.336799 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-host-run-netns\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.336837 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/701c1e82-a66e-40d9-884e-2d59449edccc-hosts-file\") pod \"node-resolver-dlvd4\" (UID: \"701c1e82-a66e-40d9-884e-2d59449edccc\") " pod="openshift-dns/node-resolver-dlvd4" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.336904 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-hostroot\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.336940 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-etc-openvswitch\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.336971 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-cni-netd\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.337004 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-multus-cni-dir\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.337042 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/4943d6ca-5152-4ac1-a9d3-850d5a5063b7-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-2z7vf\" (UID: \"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\") " pod="openshift-multus/multus-additional-cni-plugins-2z7vf" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.337064 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/4943d6ca-5152-4ac1-a9d3-850d5a5063b7-tuning-conf-dir\") pod \"multus-additional-cni-plugins-2z7vf\" (UID: \"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\") " pod="openshift-multus/multus-additional-cni-plugins-2z7vf" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.337100 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrnqs\" (UniqueName: \"kubernetes.io/projected/232b7aad-b4bd-495a-a411-0cfd48fa372c-kube-api-access-rrnqs\") pod \"machine-config-daemon-wjwph\" (UID: \"232b7aad-b4bd-495a-a411-0cfd48fa372c\") " pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.337134 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-host-var-lib-cni-multus\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.337135 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/229974d7-7b78-434b-a346-8b9004e69bf2-multus-daemon-config\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.337136 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-run-netns\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.337194 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-run-netns\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.337204 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-env-overrides\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.337727 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-env-overrides\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.337783 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-hostroot\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.337813 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4943d6ca-5152-4ac1-a9d3-850d5a5063b7-cni-binary-copy\") pod \"multus-additional-cni-plugins-2z7vf\" (UID: \"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\") " pod="openshift-multus/multus-additional-cni-plugins-2z7vf" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.337953 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-etc-openvswitch\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.338031 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-multus-cni-dir\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.338036 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-system-cni-dir\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.336684 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/4943d6ca-5152-4ac1-a9d3-850d5a5063b7-os-release\") pod \"multus-additional-cni-plugins-2z7vf\" (UID: \"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\") " pod="openshift-multus/multus-additional-cni-plugins-2z7vf" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.338059 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-cni-netd\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.338074 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-cnibin\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.338093 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/229974d7-7b78-434b-a346-8b9004e69bf2-host-run-netns\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.338113 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/701c1e82-a66e-40d9-884e-2d59449edccc-hosts-file\") pod \"node-resolver-dlvd4\" (UID: \"701c1e82-a66e-40d9-884e-2d59449edccc\") " pod="openshift-dns/node-resolver-dlvd4" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.337109 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-log-socket\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.338528 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/232b7aad-b4bd-495a-a411-0cfd48fa372c-mcd-auth-proxy-config\") pod \"machine-config-daemon-wjwph\" (UID: \"232b7aad-b4bd-495a-a411-0cfd48fa372c\") " pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.338836 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/4943d6ca-5152-4ac1-a9d3-850d5a5063b7-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-2z7vf\" (UID: \"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\") " pod="openshift-multus/multus-additional-cni-plugins-2z7vf" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.343978 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-ovn-node-metrics-cert\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.349126 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:34Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.358577 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/232b7aad-b4bd-495a-a411-0cfd48fa372c-proxy-tls\") pod \"machine-config-daemon-wjwph\" (UID: \"232b7aad-b4bd-495a-a411-0cfd48fa372c\") " pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.365593 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fnc94\" (UniqueName: \"kubernetes.io/projected/701c1e82-a66e-40d9-884e-2d59449edccc-kube-api-access-fnc94\") pod \"node-resolver-dlvd4\" (UID: \"701c1e82-a66e-40d9-884e-2d59449edccc\") " pod="openshift-dns/node-resolver-dlvd4" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.365845 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8mpcc\" (UniqueName: \"kubernetes.io/projected/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-kube-api-access-8mpcc\") pod \"ovnkube-node-bbznt\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.366076 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zt8rx\" (UniqueName: \"kubernetes.io/projected/4943d6ca-5152-4ac1-a9d3-850d5a5063b7-kube-api-access-zt8rx\") pod \"multus-additional-cni-plugins-2z7vf\" (UID: \"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\") " pod="openshift-multus/multus-additional-cni-plugins-2z7vf" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.366488 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrnqs\" (UniqueName: \"kubernetes.io/projected/232b7aad-b4bd-495a-a411-0cfd48fa372c-kube-api-access-rrnqs\") pod \"machine-config-daemon-wjwph\" (UID: \"232b7aad-b4bd-495a-a411-0cfd48fa372c\") " pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.366670 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-72nv6\" (UniqueName: \"kubernetes.io/projected/229974d7-7b78-434b-a346-8b9004e69bf2-kube-api-access-72nv6\") pod \"multus-bxnsh\" (UID: \"229974d7-7b78-434b-a346-8b9004e69bf2\") " pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.381189 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:34Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.403781 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:34Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.410871 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.420990 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.438924 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:34 crc kubenswrapper[4903]: W1126 22:21:34.439847 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod232b7aad_b4bd_495a_a411_0cfd48fa372c.slice/crio-007083bdd13dde6450d8038aaa159bea9c5904b128dade47b12e5d686de6e740 WatchSource:0}: Error finding container 007083bdd13dde6450d8038aaa159bea9c5904b128dade47b12e5d686de6e740: Status 404 returned error can't find the container with id 007083bdd13dde6450d8038aaa159bea9c5904b128dade47b12e5d686de6e740 Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.443597 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:34Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.452940 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-bxnsh" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.464330 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-dlvd4" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.484181 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:34Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.503988 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:34Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.529359 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:34Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.541032 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:21:34 crc kubenswrapper[4903]: E1126 22:21:34.541188 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:21:36.54115959 +0000 UTC m=+25.231394500 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.541327 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:21:34 crc kubenswrapper[4903]: E1126 22:21:34.541546 4903 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 22:21:34 crc kubenswrapper[4903]: E1126 22:21:34.541645 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 22:21:36.541619743 +0000 UTC m=+25.231854823 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 22:21:34 crc kubenswrapper[4903]: W1126 22:21:34.582840 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod701c1e82_a66e_40d9_884e_2d59449edccc.slice/crio-15b29642d575dac80fbe2e483c29495a62da938be7755cd2eaf12a1c0b3cfb77 WatchSource:0}: Error finding container 15b29642d575dac80fbe2e483c29495a62da938be7755cd2eaf12a1c0b3cfb77: Status 404 returned error can't find the container with id 15b29642d575dac80fbe2e483c29495a62da938be7755cd2eaf12a1c0b3cfb77 Nov 26 22:21:34 crc kubenswrapper[4903]: W1126 22:21:34.584456 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod229974d7_7b78_434b_a346_8b9004e69bf2.slice/crio-5f3db249c8d644a10cec0166d1debcb2d610d72a836f99805197587d67fd0e44 WatchSource:0}: Error finding container 5f3db249c8d644a10cec0166d1debcb2d610d72a836f99805197587d67fd0e44: Status 404 returned error can't find the container with id 5f3db249c8d644a10cec0166d1debcb2d610d72a836f99805197587d67fd0e44 Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.642114 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.642193 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:21:34 crc kubenswrapper[4903]: I1126 22:21:34.642225 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:21:34 crc kubenswrapper[4903]: E1126 22:21:34.642642 4903 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 22:21:34 crc kubenswrapper[4903]: E1126 22:21:34.642739 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 22:21:36.64271877 +0000 UTC m=+25.332953680 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 22:21:34 crc kubenswrapper[4903]: E1126 22:21:34.643285 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 22:21:34 crc kubenswrapper[4903]: E1126 22:21:34.643331 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 22:21:34 crc kubenswrapper[4903]: E1126 22:21:34.643347 4903 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:21:34 crc kubenswrapper[4903]: E1126 22:21:34.643375 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 22:21:36.643367958 +0000 UTC m=+25.333602868 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:21:34 crc kubenswrapper[4903]: E1126 22:21:34.643455 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 22:21:34 crc kubenswrapper[4903]: E1126 22:21:34.643485 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 22:21:34 crc kubenswrapper[4903]: E1126 22:21:34.643495 4903 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:21:34 crc kubenswrapper[4903]: E1126 22:21:34.643518 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 22:21:36.643511523 +0000 UTC m=+25.333746433 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.027859 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.027878 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:21:35 crc kubenswrapper[4903]: E1126 22:21:35.027998 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:21:35 crc kubenswrapper[4903]: E1126 22:21:35.028136 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.174764 4903 generic.go:334] "Generic (PLEG): container finished" podID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerID="7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6" exitCode=0 Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.174860 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" event={"ID":"ef55a921-a95f-4b2b-84b7-98c1082a1bb6","Type":"ContainerDied","Data":"7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6"} Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.174905 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" event={"ID":"ef55a921-a95f-4b2b-84b7-98c1082a1bb6","Type":"ContainerStarted","Data":"acdfc2b82174b43757c8586dd647dce4fd2a6a49a7cd07e8050250cb98884d88"} Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.182783 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerStarted","Data":"4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e"} Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.182854 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerStarted","Data":"0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740"} Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.182871 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerStarted","Data":"007083bdd13dde6450d8038aaa159bea9c5904b128dade47b12e5d686de6e740"} Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.184640 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" event={"ID":"4943d6ca-5152-4ac1-a9d3-850d5a5063b7","Type":"ContainerStarted","Data":"bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563"} Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.184783 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" event={"ID":"4943d6ca-5152-4ac1-a9d3-850d5a5063b7","Type":"ContainerStarted","Data":"5d14af40405cbed71307693b57e8ad2ccb4b9f4fc5a86629b7806809daaaec51"} Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.193679 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bxnsh" event={"ID":"229974d7-7b78-434b-a346-8b9004e69bf2","Type":"ContainerStarted","Data":"8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df"} Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.193753 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bxnsh" event={"ID":"229974d7-7b78-434b-a346-8b9004e69bf2","Type":"ContainerStarted","Data":"5f3db249c8d644a10cec0166d1debcb2d610d72a836f99805197587d67fd0e44"} Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.197473 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-dlvd4" event={"ID":"701c1e82-a66e-40d9-884e-2d59449edccc","Type":"ContainerStarted","Data":"6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427"} Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.197556 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-dlvd4" event={"ID":"701c1e82-a66e-40d9-884e-2d59449edccc","Type":"ContainerStarted","Data":"15b29642d575dac80fbe2e483c29495a62da938be7755cd2eaf12a1c0b3cfb77"} Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.201579 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:35Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.203229 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.205798 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238"} Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.216985 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:35Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.231366 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:35Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.254854 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:35Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.268958 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:35Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.284496 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:35Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.310227 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:35Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.332791 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:35Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.349928 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:35Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.376639 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:35Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.390723 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:35Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.408195 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:35Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.425053 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:35Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.440853 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:35Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.461491 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:35Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.504296 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:35Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.520549 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:35Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.535822 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:35Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.556070 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:35Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.573639 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:35Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.587945 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:35Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.600823 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:35Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.612105 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:35Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.627311 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:35Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.643038 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:35Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:35 crc kubenswrapper[4903]: I1126 22:21:35.660365 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:35Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.028235 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:21:36 crc kubenswrapper[4903]: E1126 22:21:36.028429 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.213988 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" event={"ID":"ef55a921-a95f-4b2b-84b7-98c1082a1bb6","Type":"ContainerStarted","Data":"74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca"} Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.214072 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" event={"ID":"ef55a921-a95f-4b2b-84b7-98c1082a1bb6","Type":"ContainerStarted","Data":"02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff"} Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.214091 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" event={"ID":"ef55a921-a95f-4b2b-84b7-98c1082a1bb6","Type":"ContainerStarted","Data":"c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364"} Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.214106 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" event={"ID":"ef55a921-a95f-4b2b-84b7-98c1082a1bb6","Type":"ContainerStarted","Data":"c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a"} Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.214120 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" event={"ID":"ef55a921-a95f-4b2b-84b7-98c1082a1bb6","Type":"ContainerStarted","Data":"e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a"} Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.214138 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" event={"ID":"ef55a921-a95f-4b2b-84b7-98c1082a1bb6","Type":"ContainerStarted","Data":"8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0"} Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.215916 4903 generic.go:334] "Generic (PLEG): container finished" podID="4943d6ca-5152-4ac1-a9d3-850d5a5063b7" containerID="bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563" exitCode=0 Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.215992 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" event={"ID":"4943d6ca-5152-4ac1-a9d3-850d5a5063b7","Type":"ContainerDied","Data":"bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563"} Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.218938 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0"} Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.219302 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.239418 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:36Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.263581 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:36Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.278843 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:36Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.293999 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:36Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.310487 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:36Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.325080 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:36Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.341421 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:36Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.358385 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:36Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.372621 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:36Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.394794 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:36Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.407232 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:36Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.420132 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:36Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.436760 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:36Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.449188 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:36Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.459784 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:36Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.470352 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:36Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.483464 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:36Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.495082 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:36Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.508428 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:36Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.524278 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:36Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.537661 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:36Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.552569 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:36Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.560969 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.561135 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:21:36 crc kubenswrapper[4903]: E1126 22:21:36.561160 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:21:40.561127148 +0000 UTC m=+29.251362058 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:21:36 crc kubenswrapper[4903]: E1126 22:21:36.561314 4903 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 22:21:36 crc kubenswrapper[4903]: E1126 22:21:36.561405 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 22:21:40.561380715 +0000 UTC m=+29.251615665 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.563683 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:36Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.578524 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:36Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.596289 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:36Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.608469 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:36Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.662091 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.662324 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:21:36 crc kubenswrapper[4903]: I1126 22:21:36.662361 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:21:36 crc kubenswrapper[4903]: E1126 22:21:36.662478 4903 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 22:21:36 crc kubenswrapper[4903]: E1126 22:21:36.662550 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 22:21:40.662534234 +0000 UTC m=+29.352769144 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 22:21:36 crc kubenswrapper[4903]: E1126 22:21:36.662591 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 22:21:36 crc kubenswrapper[4903]: E1126 22:21:36.662638 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 22:21:36 crc kubenswrapper[4903]: E1126 22:21:36.662666 4903 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:21:36 crc kubenswrapper[4903]: E1126 22:21:36.662719 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 22:21:36 crc kubenswrapper[4903]: E1126 22:21:36.662781 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 22:21:36 crc kubenswrapper[4903]: E1126 22:21:36.662804 4903 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:21:36 crc kubenswrapper[4903]: E1126 22:21:36.662808 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 22:21:40.662773921 +0000 UTC m=+29.353008871 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:21:36 crc kubenswrapper[4903]: E1126 22:21:36.662913 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 22:21:40.662868013 +0000 UTC m=+29.353103153 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.027598 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.027649 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:21:37 crc kubenswrapper[4903]: E1126 22:21:37.027754 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:21:37 crc kubenswrapper[4903]: E1126 22:21:37.027878 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.224817 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" event={"ID":"4943d6ca-5152-4ac1-a9d3-850d5a5063b7","Type":"ContainerStarted","Data":"3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35"} Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.240498 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:37Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.262612 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:37Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.283969 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:37Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.305185 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:37Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.325865 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:37Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.337822 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:37Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.349068 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:37Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.360900 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:37Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.375293 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:37Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.388245 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:37Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.399544 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:37Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.411406 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:37Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.427720 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:37Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.545747 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-knwk2"] Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.546325 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-knwk2" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.549518 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.549995 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.550095 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.550138 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.572948 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:37Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.585444 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:37Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.594204 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:37Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.605129 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:37Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.616519 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:37Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.631961 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:37Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.642922 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:37Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.655751 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:37Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.670573 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/77f835e9-1a25-43f2-9c32-5d5311495723-host\") pod \"node-ca-knwk2\" (UID: \"77f835e9-1a25-43f2-9c32-5d5311495723\") " pod="openshift-image-registry/node-ca-knwk2" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.670666 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/77f835e9-1a25-43f2-9c32-5d5311495723-serviceca\") pod \"node-ca-knwk2\" (UID: \"77f835e9-1a25-43f2-9c32-5d5311495723\") " pod="openshift-image-registry/node-ca-knwk2" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.670852 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-72n8x\" (UniqueName: \"kubernetes.io/projected/77f835e9-1a25-43f2-9c32-5d5311495723-kube-api-access-72n8x\") pod \"node-ca-knwk2\" (UID: \"77f835e9-1a25-43f2-9c32-5d5311495723\") " pod="openshift-image-registry/node-ca-knwk2" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.672145 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:37Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.684596 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:37Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.698635 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:37Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.714747 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:37Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.728037 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:37Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.751090 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:37Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.771553 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/77f835e9-1a25-43f2-9c32-5d5311495723-host\") pod \"node-ca-knwk2\" (UID: \"77f835e9-1a25-43f2-9c32-5d5311495723\") " pod="openshift-image-registry/node-ca-knwk2" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.771909 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/77f835e9-1a25-43f2-9c32-5d5311495723-serviceca\") pod \"node-ca-knwk2\" (UID: \"77f835e9-1a25-43f2-9c32-5d5311495723\") " pod="openshift-image-registry/node-ca-knwk2" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.772523 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-72n8x\" (UniqueName: \"kubernetes.io/projected/77f835e9-1a25-43f2-9c32-5d5311495723-kube-api-access-72n8x\") pod \"node-ca-knwk2\" (UID: \"77f835e9-1a25-43f2-9c32-5d5311495723\") " pod="openshift-image-registry/node-ca-knwk2" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.771770 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/77f835e9-1a25-43f2-9c32-5d5311495723-host\") pod \"node-ca-knwk2\" (UID: \"77f835e9-1a25-43f2-9c32-5d5311495723\") " pod="openshift-image-registry/node-ca-knwk2" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.773164 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/77f835e9-1a25-43f2-9c32-5d5311495723-serviceca\") pod \"node-ca-knwk2\" (UID: \"77f835e9-1a25-43f2-9c32-5d5311495723\") " pod="openshift-image-registry/node-ca-knwk2" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.790972 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-72n8x\" (UniqueName: \"kubernetes.io/projected/77f835e9-1a25-43f2-9c32-5d5311495723-kube-api-access-72n8x\") pod \"node-ca-knwk2\" (UID: \"77f835e9-1a25-43f2-9c32-5d5311495723\") " pod="openshift-image-registry/node-ca-knwk2" Nov 26 22:21:37 crc kubenswrapper[4903]: I1126 22:21:37.860501 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-knwk2" Nov 26 22:21:37 crc kubenswrapper[4903]: W1126 22:21:37.883143 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod77f835e9_1a25_43f2_9c32_5d5311495723.slice/crio-9280fefddb91314321131880b9df1dea2dac148891c596125dccaef7d1635f14 WatchSource:0}: Error finding container 9280fefddb91314321131880b9df1dea2dac148891c596125dccaef7d1635f14: Status 404 returned error can't find the container with id 9280fefddb91314321131880b9df1dea2dac148891c596125dccaef7d1635f14 Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.027941 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:21:38 crc kubenswrapper[4903]: E1126 22:21:38.028499 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.230083 4903 generic.go:334] "Generic (PLEG): container finished" podID="4943d6ca-5152-4ac1-a9d3-850d5a5063b7" containerID="3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35" exitCode=0 Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.230182 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" event={"ID":"4943d6ca-5152-4ac1-a9d3-850d5a5063b7","Type":"ContainerDied","Data":"3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35"} Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.237177 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" event={"ID":"ef55a921-a95f-4b2b-84b7-98c1082a1bb6","Type":"ContainerStarted","Data":"ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f"} Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.241341 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-knwk2" event={"ID":"77f835e9-1a25-43f2-9c32-5d5311495723","Type":"ContainerStarted","Data":"7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4"} Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.241398 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-knwk2" event={"ID":"77f835e9-1a25-43f2-9c32-5d5311495723","Type":"ContainerStarted","Data":"9280fefddb91314321131880b9df1dea2dac148891c596125dccaef7d1635f14"} Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.250260 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.272105 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.285913 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.301139 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.314154 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.337506 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.348957 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.364279 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.376399 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.387380 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.405603 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.418500 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.435057 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.443425 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.458646 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.470059 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.482802 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.495343 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.508628 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.521513 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.532818 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.546204 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.561285 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.578499 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.595201 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.617772 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.641121 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.653286 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.699822 4903 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.701822 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.701856 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.701864 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.701999 4903 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.709908 4903 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.710146 4903 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.711184 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.711212 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.711219 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.711234 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.711245 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:38Z","lastTransitionTime":"2025-11-26T22:21:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:38 crc kubenswrapper[4903]: E1126 22:21:38.744108 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.747748 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.747792 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.747805 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.747824 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.747837 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:38Z","lastTransitionTime":"2025-11-26T22:21:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:38 crc kubenswrapper[4903]: E1126 22:21:38.764837 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.769877 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.769919 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.769937 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.769961 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.769978 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:38Z","lastTransitionTime":"2025-11-26T22:21:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:38 crc kubenswrapper[4903]: E1126 22:21:38.789611 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.794355 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.794388 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.794405 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.794427 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.794445 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:38Z","lastTransitionTime":"2025-11-26T22:21:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:38 crc kubenswrapper[4903]: E1126 22:21:38.810156 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.814653 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.814721 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.814737 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.814755 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.814768 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:38Z","lastTransitionTime":"2025-11-26T22:21:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:38 crc kubenswrapper[4903]: E1126 22:21:38.828323 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:38Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:38 crc kubenswrapper[4903]: E1126 22:21:38.828641 4903 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.830082 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.830112 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.830123 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.830137 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.830148 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:38Z","lastTransitionTime":"2025-11-26T22:21:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.932810 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.932855 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.932867 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.932885 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:38 crc kubenswrapper[4903]: I1126 22:21:38.932900 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:38Z","lastTransitionTime":"2025-11-26T22:21:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.027409 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.027424 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:21:39 crc kubenswrapper[4903]: E1126 22:21:39.027550 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:21:39 crc kubenswrapper[4903]: E1126 22:21:39.027650 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.038994 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.039069 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.039087 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.039162 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.039190 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:39Z","lastTransitionTime":"2025-11-26T22:21:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.141891 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.142089 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.142171 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.142240 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.142322 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:39Z","lastTransitionTime":"2025-11-26T22:21:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.245179 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.245242 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.245265 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.245293 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.245313 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:39Z","lastTransitionTime":"2025-11-26T22:21:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.250417 4903 generic.go:334] "Generic (PLEG): container finished" podID="4943d6ca-5152-4ac1-a9d3-850d5a5063b7" containerID="54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f" exitCode=0 Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.250450 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" event={"ID":"4943d6ca-5152-4ac1-a9d3-850d5a5063b7","Type":"ContainerDied","Data":"54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f"} Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.272662 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:39Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.289909 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:39Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.303871 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:39Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.341469 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:39Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.351315 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.351347 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.351356 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.351371 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.351380 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:39Z","lastTransitionTime":"2025-11-26T22:21:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.364229 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:39Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.387907 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:39Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.409841 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:39Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.420578 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:39Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.433065 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:39Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.443215 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:39Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.453753 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.453783 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.453792 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.453806 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.453814 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:39Z","lastTransitionTime":"2025-11-26T22:21:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.454993 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:39Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.464446 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:39Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.473684 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:39Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.484541 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:39Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.557378 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.557440 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.557459 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.557485 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.557504 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:39Z","lastTransitionTime":"2025-11-26T22:21:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.660517 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.660555 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.660566 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.660582 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.660593 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:39Z","lastTransitionTime":"2025-11-26T22:21:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.764416 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.764469 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.764490 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.764520 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.764544 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:39Z","lastTransitionTime":"2025-11-26T22:21:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.868440 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.868521 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.868543 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.868574 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.868596 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:39Z","lastTransitionTime":"2025-11-26T22:21:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.972113 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.972172 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.972188 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.972212 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:39 crc kubenswrapper[4903]: I1126 22:21:39.972231 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:39Z","lastTransitionTime":"2025-11-26T22:21:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.028276 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:21:40 crc kubenswrapper[4903]: E1126 22:21:40.028485 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.076341 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.076396 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.076412 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.076442 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.076466 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:40Z","lastTransitionTime":"2025-11-26T22:21:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.179247 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.179318 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.179340 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.179369 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.179390 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:40Z","lastTransitionTime":"2025-11-26T22:21:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.257888 4903 generic.go:334] "Generic (PLEG): container finished" podID="4943d6ca-5152-4ac1-a9d3-850d5a5063b7" containerID="2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94" exitCode=0 Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.257957 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" event={"ID":"4943d6ca-5152-4ac1-a9d3-850d5a5063b7","Type":"ContainerDied","Data":"2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94"} Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.282447 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.282507 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.282528 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.282558 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.282580 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:40Z","lastTransitionTime":"2025-11-26T22:21:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.285098 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:40Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.305742 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:40Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.385141 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.385211 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.385227 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.385252 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.385272 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:40Z","lastTransitionTime":"2025-11-26T22:21:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.448254 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:40Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.473105 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:40Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.488230 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.488287 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.488302 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.488322 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.488339 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:40Z","lastTransitionTime":"2025-11-26T22:21:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.490553 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:40Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.505297 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:40Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.517108 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:40Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.528480 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:40Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.543738 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:40Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.556493 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:40Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.572303 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:40Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.584107 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:40Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.593038 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.593101 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.593121 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.593148 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.593165 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:40Z","lastTransitionTime":"2025-11-26T22:21:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.602913 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.603118 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:21:40 crc kubenswrapper[4903]: E1126 22:21:40.603142 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:21:48.60311011 +0000 UTC m=+37.293345060 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:21:40 crc kubenswrapper[4903]: E1126 22:21:40.603278 4903 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 22:21:40 crc kubenswrapper[4903]: E1126 22:21:40.603362 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 22:21:48.603338786 +0000 UTC m=+37.293573736 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.608346 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:40Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.628209 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:40Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.697296 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.697359 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.697380 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.697410 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.697433 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:40Z","lastTransitionTime":"2025-11-26T22:21:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.704570 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.704805 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:21:40 crc kubenswrapper[4903]: E1126 22:21:40.704758 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 22:21:40 crc kubenswrapper[4903]: E1126 22:21:40.704862 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 22:21:40 crc kubenswrapper[4903]: E1126 22:21:40.704877 4903 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:21:40 crc kubenswrapper[4903]: E1126 22:21:40.704930 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 22:21:48.704912856 +0000 UTC m=+37.395147776 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:21:40 crc kubenswrapper[4903]: E1126 22:21:40.705175 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 22:21:40 crc kubenswrapper[4903]: E1126 22:21:40.705218 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 22:21:40 crc kubenswrapper[4903]: E1126 22:21:40.705239 4903 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.705282 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:21:40 crc kubenswrapper[4903]: E1126 22:21:40.705313 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 22:21:48.705287236 +0000 UTC m=+37.395522186 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:21:40 crc kubenswrapper[4903]: E1126 22:21:40.705377 4903 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 22:21:40 crc kubenswrapper[4903]: E1126 22:21:40.705414 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 22:21:48.70540435 +0000 UTC m=+37.395639330 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.799955 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.799993 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.800002 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.800016 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.800024 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:40Z","lastTransitionTime":"2025-11-26T22:21:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.909402 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.909465 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.909482 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.909506 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:40 crc kubenswrapper[4903]: I1126 22:21:40.909525 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:40Z","lastTransitionTime":"2025-11-26T22:21:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.012055 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.012116 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.012134 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.012159 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.012179 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:41Z","lastTransitionTime":"2025-11-26T22:21:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.027640 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.027766 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:21:41 crc kubenswrapper[4903]: E1126 22:21:41.027849 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:21:41 crc kubenswrapper[4903]: E1126 22:21:41.027953 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.115592 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.115630 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.115648 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.115666 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.115677 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:41Z","lastTransitionTime":"2025-11-26T22:21:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.217613 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.217683 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.217735 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.217761 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.217779 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:41Z","lastTransitionTime":"2025-11-26T22:21:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.269384 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" event={"ID":"ef55a921-a95f-4b2b-84b7-98c1082a1bb6","Type":"ContainerStarted","Data":"8a1fc877cd84029ff6db683fa5dbc1093f04cb4697e1982d2d0b6ff227a4fc3e"} Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.269865 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.269911 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.275242 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" event={"ID":"4943d6ca-5152-4ac1-a9d3-850d5a5063b7","Type":"ContainerStarted","Data":"6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7"} Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.296115 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:41Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.319857 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:41Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.320838 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.320905 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.320922 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.320948 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.320967 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:41Z","lastTransitionTime":"2025-11-26T22:21:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.329543 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.331250 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.355659 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1fc877cd84029ff6db683fa5dbc1093f04cb4697e1982d2d0b6ff227a4fc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:41Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.372971 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:41Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.393344 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:41Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.414623 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:41Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.423602 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.423667 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.423685 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.423740 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.423758 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:41Z","lastTransitionTime":"2025-11-26T22:21:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.436494 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:41Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.456127 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:41Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.474327 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:41Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.490804 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:41Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.507956 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:41Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.526755 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.526816 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.526833 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.526857 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.526874 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:41Z","lastTransitionTime":"2025-11-26T22:21:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.533302 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:41Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.558511 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:41Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.579260 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:41Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.601457 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:41Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.624120 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:41Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.629999 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.630060 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.630077 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.630104 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.630120 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:41Z","lastTransitionTime":"2025-11-26T22:21:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.647278 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:41Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.669966 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:41Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.691377 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:41Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.723883 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1fc877cd84029ff6db683fa5dbc1093f04cb4697e1982d2d0b6ff227a4fc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:41Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.733484 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.733538 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.733554 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.733577 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.733594 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:41Z","lastTransitionTime":"2025-11-26T22:21:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.741445 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:41Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.765987 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:41Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.783006 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:41Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.800005 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:41Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.821004 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:41Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.836679 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.836774 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.836793 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.836816 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.836833 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:41Z","lastTransitionTime":"2025-11-26T22:21:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.841963 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:41Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.889609 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:41Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.906539 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:41Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.940019 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.940073 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.940093 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.940119 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:41 crc kubenswrapper[4903]: I1126 22:21:41.940135 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:41Z","lastTransitionTime":"2025-11-26T22:21:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.027831 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:21:42 crc kubenswrapper[4903]: E1126 22:21:42.028053 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.042641 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.042720 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.042742 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.042765 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.042783 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:42Z","lastTransitionTime":"2025-11-26T22:21:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.050807 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.072005 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.105965 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1fc877cd84029ff6db683fa5dbc1093f04cb4697e1982d2d0b6ff227a4fc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.122446 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.145289 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.145361 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.145389 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.145422 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.145426 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.145482 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:42Z","lastTransitionTime":"2025-11-26T22:21:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.172441 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.196398 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.217074 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.237305 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.249011 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.249070 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.249088 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.249113 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.249130 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:42Z","lastTransitionTime":"2025-11-26T22:21:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.252184 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.270179 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.285392 4903 generic.go:334] "Generic (PLEG): container finished" podID="4943d6ca-5152-4ac1-a9d3-850d5a5063b7" containerID="6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7" exitCode=0 Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.285485 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" event={"ID":"4943d6ca-5152-4ac1-a9d3-850d5a5063b7","Type":"ContainerDied","Data":"6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7"} Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.285638 4903 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.286462 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.309850 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.325130 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.342550 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.352378 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.352408 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.352418 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.352434 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.352445 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:42Z","lastTransitionTime":"2025-11-26T22:21:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.358193 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.376025 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.392002 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.408230 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.436018 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.450019 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.455553 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.455603 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.455619 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.455642 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.455656 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:42Z","lastTransitionTime":"2025-11-26T22:21:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.465676 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.480416 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.498003 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.511011 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.542021 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.558193 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.558249 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.558262 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.558285 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.558299 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:42Z","lastTransitionTime":"2025-11-26T22:21:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.565665 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1fc877cd84029ff6db683fa5dbc1093f04cb4697e1982d2d0b6ff227a4fc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.577525 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.661384 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.661447 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.661460 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.661486 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.661502 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:42Z","lastTransitionTime":"2025-11-26T22:21:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.765467 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.765537 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.765556 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.765585 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.765605 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:42Z","lastTransitionTime":"2025-11-26T22:21:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.868360 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.868459 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.868485 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.868516 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.868538 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:42Z","lastTransitionTime":"2025-11-26T22:21:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.972137 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.972199 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.972218 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.972244 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:42 crc kubenswrapper[4903]: I1126 22:21:42.972265 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:42Z","lastTransitionTime":"2025-11-26T22:21:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.028443 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.028515 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:21:43 crc kubenswrapper[4903]: E1126 22:21:43.028624 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:21:43 crc kubenswrapper[4903]: E1126 22:21:43.028765 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.075907 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.075966 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.075984 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.076007 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.076027 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:43Z","lastTransitionTime":"2025-11-26T22:21:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.179687 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.179796 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.179815 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.179844 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.179868 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:43Z","lastTransitionTime":"2025-11-26T22:21:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.282912 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.282985 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.283005 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.283035 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.283055 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:43Z","lastTransitionTime":"2025-11-26T22:21:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.298136 4903 generic.go:334] "Generic (PLEG): container finished" podID="4943d6ca-5152-4ac1-a9d3-850d5a5063b7" containerID="c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa" exitCode=0 Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.298269 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" event={"ID":"4943d6ca-5152-4ac1-a9d3-850d5a5063b7","Type":"ContainerDied","Data":"c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa"} Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.298339 4903 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.322820 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:43Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.349307 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:43Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.372967 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:43Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.386095 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.386148 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.386187 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.386208 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.386222 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:43Z","lastTransitionTime":"2025-11-26T22:21:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.398043 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:43Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.428824 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1fc877cd84029ff6db683fa5dbc1093f04cb4697e1982d2d0b6ff227a4fc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:43Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.451012 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:43Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.471956 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:43Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.491026 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.491021 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:43Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.491094 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.491270 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.491321 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.491347 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:43Z","lastTransitionTime":"2025-11-26T22:21:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.516333 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:43Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.534066 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:43Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.553593 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:43Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.573630 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:43Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.589937 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:43Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.593404 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.593457 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.593468 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.593488 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.593501 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:43Z","lastTransitionTime":"2025-11-26T22:21:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.612326 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:43Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.696051 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.696129 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.696148 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.696183 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.696199 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:43Z","lastTransitionTime":"2025-11-26T22:21:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.800080 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.800168 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.800186 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.800213 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.800230 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:43Z","lastTransitionTime":"2025-11-26T22:21:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.903305 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.903370 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.903388 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.903414 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:43 crc kubenswrapper[4903]: I1126 22:21:43.903431 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:43Z","lastTransitionTime":"2025-11-26T22:21:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.006414 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.006896 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.006915 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.006946 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.006967 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:44Z","lastTransitionTime":"2025-11-26T22:21:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.028076 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:21:44 crc kubenswrapper[4903]: E1126 22:21:44.028242 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.110030 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.110067 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.110078 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.110096 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.110107 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:44Z","lastTransitionTime":"2025-11-26T22:21:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.213264 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.213321 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.213339 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.213365 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.213386 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:44Z","lastTransitionTime":"2025-11-26T22:21:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.310335 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" event={"ID":"4943d6ca-5152-4ac1-a9d3-850d5a5063b7","Type":"ContainerStarted","Data":"aa4d6694eef4e4b5bb0c06f961f4d9ad0670b58a9a450e98072559ed3ef57397"} Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.316369 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.316450 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.316476 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.316505 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.316528 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:44Z","lastTransitionTime":"2025-11-26T22:21:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.335007 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:44Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.355354 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:44Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.378426 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:44Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.401168 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:44Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.420837 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.420900 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.420917 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.420941 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.420958 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:44Z","lastTransitionTime":"2025-11-26T22:21:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.421505 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:44Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.441951 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:44Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.462684 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:44Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.490931 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:44Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.513826 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:44Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.524594 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.524664 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.524726 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.524763 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.524787 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:44Z","lastTransitionTime":"2025-11-26T22:21:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.538272 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa4d6694eef4e4b5bb0c06f961f4d9ad0670b58a9a450e98072559ed3ef57397\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:44Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.571274 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1fc877cd84029ff6db683fa5dbc1093f04cb4697e1982d2d0b6ff227a4fc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:44Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.590136 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:44Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.613892 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:44Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.627767 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.627830 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.627846 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.627871 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.627887 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:44Z","lastTransitionTime":"2025-11-26T22:21:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.637142 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:44Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.731037 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.731098 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.731118 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.731144 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.731161 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:44Z","lastTransitionTime":"2025-11-26T22:21:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.834042 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.834107 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.834124 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.834156 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.834174 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:44Z","lastTransitionTime":"2025-11-26T22:21:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.936826 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.936896 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.936915 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.936940 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:44 crc kubenswrapper[4903]: I1126 22:21:44.936960 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:44Z","lastTransitionTime":"2025-11-26T22:21:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.028289 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.028458 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:21:45 crc kubenswrapper[4903]: E1126 22:21:45.028541 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:21:45 crc kubenswrapper[4903]: E1126 22:21:45.028659 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.039312 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.039366 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.039382 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.039409 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.039428 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:45Z","lastTransitionTime":"2025-11-26T22:21:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.143177 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.143237 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.143254 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.143278 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.143301 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:45Z","lastTransitionTime":"2025-11-26T22:21:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.246619 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.246676 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.246730 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.246756 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.246773 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:45Z","lastTransitionTime":"2025-11-26T22:21:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.321258 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bbznt_ef55a921-a95f-4b2b-84b7-98c1082a1bb6/ovnkube-controller/0.log" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.326597 4903 generic.go:334] "Generic (PLEG): container finished" podID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerID="8a1fc877cd84029ff6db683fa5dbc1093f04cb4697e1982d2d0b6ff227a4fc3e" exitCode=1 Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.326682 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" event={"ID":"ef55a921-a95f-4b2b-84b7-98c1082a1bb6","Type":"ContainerDied","Data":"8a1fc877cd84029ff6db683fa5dbc1093f04cb4697e1982d2d0b6ff227a4fc3e"} Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.327874 4903 scope.go:117] "RemoveContainer" containerID="8a1fc877cd84029ff6db683fa5dbc1093f04cb4697e1982d2d0b6ff227a4fc3e" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.349433 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.349518 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.349539 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.349570 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.349592 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:45Z","lastTransitionTime":"2025-11-26T22:21:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.372355 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a1fc877cd84029ff6db683fa5dbc1093f04cb4697e1982d2d0b6ff227a4fc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a1fc877cd84029ff6db683fa5dbc1093f04cb4697e1982d2d0b6ff227a4fc3e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"message\\\":\\\"4\\\\nI1126 22:21:44.358183 6152 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 22:21:44.358202 6152 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 22:21:44.358214 6152 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 22:21:44.358205 6152 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1126 22:21:44.358264 6152 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 22:21:44.358266 6152 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 22:21:44.358294 6152 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 22:21:44.358330 6152 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:44.358333 6152 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 22:21:44.358362 6152 factory.go:656] Stopping watch factory\\\\nI1126 22:21:44.358382 6152 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 22:21:44.358403 6152 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:45Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.389926 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:45Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.415246 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:45Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.436316 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:45Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.453095 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.453167 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.453191 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.453222 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.453245 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:45Z","lastTransitionTime":"2025-11-26T22:21:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.461118 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa4d6694eef4e4b5bb0c06f961f4d9ad0670b58a9a450e98072559ed3ef57397\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:45Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.482090 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:45Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.501096 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:45Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.518542 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:45Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.538307 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:45Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.556525 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.556575 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.556595 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.556619 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.556637 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:45Z","lastTransitionTime":"2025-11-26T22:21:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.561983 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:45Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.584100 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:45Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.603358 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:45Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.635224 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:45Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.659389 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.659438 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.659451 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.659471 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.659486 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:45Z","lastTransitionTime":"2025-11-26T22:21:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.663816 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:45Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.762017 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.762124 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.762145 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.762175 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.762196 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:45Z","lastTransitionTime":"2025-11-26T22:21:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.865216 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.865282 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.865303 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.865330 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.865363 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:45Z","lastTransitionTime":"2025-11-26T22:21:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.968762 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.968828 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.968887 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.968932 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:45 crc kubenswrapper[4903]: I1126 22:21:45.968952 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:45Z","lastTransitionTime":"2025-11-26T22:21:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.028663 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:21:46 crc kubenswrapper[4903]: E1126 22:21:46.028890 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.071971 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.072033 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.072050 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.072083 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.072103 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:46Z","lastTransitionTime":"2025-11-26T22:21:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.174629 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.174673 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.174701 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.174719 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.174732 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:46Z","lastTransitionTime":"2025-11-26T22:21:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.279063 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.279105 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.279113 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.279126 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.279136 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:46Z","lastTransitionTime":"2025-11-26T22:21:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.332644 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bbznt_ef55a921-a95f-4b2b-84b7-98c1082a1bb6/ovnkube-controller/0.log" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.335441 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" event={"ID":"ef55a921-a95f-4b2b-84b7-98c1082a1bb6","Type":"ContainerStarted","Data":"b780bb903b5642f0031c8419663bc2d18ac2bf8a9edd73fdddf49a45220a9d58"} Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.335595 4903 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.356070 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:46Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.370519 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:46Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.381552 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.381674 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.381719 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.381752 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.381773 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:46Z","lastTransitionTime":"2025-11-26T22:21:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.384148 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:46Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.397323 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:46Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.414555 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:46Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.431560 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:46Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.447132 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:46Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.465935 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa4d6694eef4e4b5bb0c06f961f4d9ad0670b58a9a450e98072559ed3ef57397\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:46Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.488168 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.488234 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.488255 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.488283 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.488302 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:46Z","lastTransitionTime":"2025-11-26T22:21:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.492084 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j"] Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.493941 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.494979 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b780bb903b5642f0031c8419663bc2d18ac2bf8a9edd73fdddf49a45220a9d58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a1fc877cd84029ff6db683fa5dbc1093f04cb4697e1982d2d0b6ff227a4fc3e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"message\\\":\\\"4\\\\nI1126 22:21:44.358183 6152 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 22:21:44.358202 6152 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 22:21:44.358214 6152 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 22:21:44.358205 6152 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1126 22:21:44.358264 6152 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 22:21:44.358266 6152 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 22:21:44.358294 6152 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 22:21:44.358330 6152 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:44.358333 6152 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 22:21:44.358362 6152 factory.go:656] Stopping watch factory\\\\nI1126 22:21:44.358382 6152 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 22:21:44.358403 6152 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:46Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.497165 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.497235 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.515086 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:46Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.531453 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:46Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.545260 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:46Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.562382 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:46Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.575201 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjk2z\" (UniqueName: \"kubernetes.io/projected/18039c76-4e57-465f-9918-e618c823dff7-kube-api-access-zjk2z\") pod \"ovnkube-control-plane-749d76644c-ft62j\" (UID: \"18039c76-4e57-465f-9918-e618c823dff7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.575245 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/18039c76-4e57-465f-9918-e618c823dff7-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-ft62j\" (UID: \"18039c76-4e57-465f-9918-e618c823dff7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.575293 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/18039c76-4e57-465f-9918-e618c823dff7-env-overrides\") pod \"ovnkube-control-plane-749d76644c-ft62j\" (UID: \"18039c76-4e57-465f-9918-e618c823dff7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.575310 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/18039c76-4e57-465f-9918-e618c823dff7-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-ft62j\" (UID: \"18039c76-4e57-465f-9918-e618c823dff7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.591239 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.591283 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.591292 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.591332 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.591344 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:46Z","lastTransitionTime":"2025-11-26T22:21:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.594557 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:46Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.612641 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:46Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.628838 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:46Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.648370 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:46Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.663600 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:46Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.674894 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:46Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.676261 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjk2z\" (UniqueName: \"kubernetes.io/projected/18039c76-4e57-465f-9918-e618c823dff7-kube-api-access-zjk2z\") pod \"ovnkube-control-plane-749d76644c-ft62j\" (UID: \"18039c76-4e57-465f-9918-e618c823dff7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.676312 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/18039c76-4e57-465f-9918-e618c823dff7-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-ft62j\" (UID: \"18039c76-4e57-465f-9918-e618c823dff7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.676371 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/18039c76-4e57-465f-9918-e618c823dff7-env-overrides\") pod \"ovnkube-control-plane-749d76644c-ft62j\" (UID: \"18039c76-4e57-465f-9918-e618c823dff7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.676399 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/18039c76-4e57-465f-9918-e618c823dff7-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-ft62j\" (UID: \"18039c76-4e57-465f-9918-e618c823dff7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.677193 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/18039c76-4e57-465f-9918-e618c823dff7-env-overrides\") pod \"ovnkube-control-plane-749d76644c-ft62j\" (UID: \"18039c76-4e57-465f-9918-e618c823dff7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.677217 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/18039c76-4e57-465f-9918-e618c823dff7-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-ft62j\" (UID: \"18039c76-4e57-465f-9918-e618c823dff7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.686868 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/18039c76-4e57-465f-9918-e618c823dff7-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-ft62j\" (UID: \"18039c76-4e57-465f-9918-e618c823dff7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.691865 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"18039c76-4e57-465f-9918-e618c823dff7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ft62j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:46Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.693347 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.693408 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.693432 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.693459 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.693479 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:46Z","lastTransitionTime":"2025-11-26T22:21:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.693922 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjk2z\" (UniqueName: \"kubernetes.io/projected/18039c76-4e57-465f-9918-e618c823dff7-kube-api-access-zjk2z\") pod \"ovnkube-control-plane-749d76644c-ft62j\" (UID: \"18039c76-4e57-465f-9918-e618c823dff7\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.709638 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:46Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.727526 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:46Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.745743 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:46Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.760102 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:46Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.785714 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:46Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.796343 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.796391 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.796407 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.796435 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.796451 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:46Z","lastTransitionTime":"2025-11-26T22:21:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.806888 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa4d6694eef4e4b5bb0c06f961f4d9ad0670b58a9a450e98072559ed3ef57397\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:46Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.814633 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" Nov 26 22:21:46 crc kubenswrapper[4903]: W1126 22:21:46.835421 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod18039c76_4e57_465f_9918_e618c823dff7.slice/crio-13f2b2a4963e3a85de4f7421d47b758f368c74a57492733c7fbed64cc8f4d39f WatchSource:0}: Error finding container 13f2b2a4963e3a85de4f7421d47b758f368c74a57492733c7fbed64cc8f4d39f: Status 404 returned error can't find the container with id 13f2b2a4963e3a85de4f7421d47b758f368c74a57492733c7fbed64cc8f4d39f Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.843296 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b780bb903b5642f0031c8419663bc2d18ac2bf8a9edd73fdddf49a45220a9d58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a1fc877cd84029ff6db683fa5dbc1093f04cb4697e1982d2d0b6ff227a4fc3e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"message\\\":\\\"4\\\\nI1126 22:21:44.358183 6152 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 22:21:44.358202 6152 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 22:21:44.358214 6152 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 22:21:44.358205 6152 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1126 22:21:44.358264 6152 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 22:21:44.358266 6152 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 22:21:44.358294 6152 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 22:21:44.358330 6152 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:44.358333 6152 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 22:21:44.358362 6152 factory.go:656] Stopping watch factory\\\\nI1126 22:21:44.358382 6152 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 22:21:44.358403 6152 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:46Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.862468 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:46Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.885182 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:46Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.901063 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.901145 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.901170 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.901202 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:46 crc kubenswrapper[4903]: I1126 22:21:46.901231 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:46Z","lastTransitionTime":"2025-11-26T22:21:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.005158 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.005228 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.005246 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.005271 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.005290 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:47Z","lastTransitionTime":"2025-11-26T22:21:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.028022 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.028107 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:21:47 crc kubenswrapper[4903]: E1126 22:21:47.028236 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:21:47 crc kubenswrapper[4903]: E1126 22:21:47.028347 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.109450 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.109524 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.109542 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.109571 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.109589 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:47Z","lastTransitionTime":"2025-11-26T22:21:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.213941 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.214410 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.214429 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.214456 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.214474 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:47Z","lastTransitionTime":"2025-11-26T22:21:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.319116 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.319162 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.319180 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.319202 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.319219 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:47Z","lastTransitionTime":"2025-11-26T22:21:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.342319 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" event={"ID":"18039c76-4e57-465f-9918-e618c823dff7","Type":"ContainerStarted","Data":"eecaea6ba41a970d1434000705f94cc7c998d76bc9b0721d5955e7bea7dde57a"} Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.342397 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" event={"ID":"18039c76-4e57-465f-9918-e618c823dff7","Type":"ContainerStarted","Data":"13f2b2a4963e3a85de4f7421d47b758f368c74a57492733c7fbed64cc8f4d39f"} Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.345306 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bbznt_ef55a921-a95f-4b2b-84b7-98c1082a1bb6/ovnkube-controller/1.log" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.346311 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bbznt_ef55a921-a95f-4b2b-84b7-98c1082a1bb6/ovnkube-controller/0.log" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.351396 4903 generic.go:334] "Generic (PLEG): container finished" podID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerID="b780bb903b5642f0031c8419663bc2d18ac2bf8a9edd73fdddf49a45220a9d58" exitCode=1 Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.351438 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" event={"ID":"ef55a921-a95f-4b2b-84b7-98c1082a1bb6","Type":"ContainerDied","Data":"b780bb903b5642f0031c8419663bc2d18ac2bf8a9edd73fdddf49a45220a9d58"} Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.351533 4903 scope.go:117] "RemoveContainer" containerID="8a1fc877cd84029ff6db683fa5dbc1093f04cb4697e1982d2d0b6ff227a4fc3e" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.352979 4903 scope.go:117] "RemoveContainer" containerID="b780bb903b5642f0031c8419663bc2d18ac2bf8a9edd73fdddf49a45220a9d58" Nov 26 22:21:47 crc kubenswrapper[4903]: E1126 22:21:47.353307 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-bbznt_openshift-ovn-kubernetes(ef55a921-a95f-4b2b-84b7-98c1082a1bb6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.376834 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:47Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.399001 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:47Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.417963 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:47Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.427883 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.427954 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.427981 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.428014 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.428041 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:47Z","lastTransitionTime":"2025-11-26T22:21:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.442538 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:47Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.463197 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:47Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.486840 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa4d6694eef4e4b5bb0c06f961f4d9ad0670b58a9a450e98072559ed3ef57397\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:47Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.513065 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b780bb903b5642f0031c8419663bc2d18ac2bf8a9edd73fdddf49a45220a9d58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a1fc877cd84029ff6db683fa5dbc1093f04cb4697e1982d2d0b6ff227a4fc3e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"message\\\":\\\"4\\\\nI1126 22:21:44.358183 6152 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 22:21:44.358202 6152 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 22:21:44.358214 6152 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 22:21:44.358205 6152 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1126 22:21:44.358264 6152 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 22:21:44.358266 6152 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 22:21:44.358294 6152 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 22:21:44.358330 6152 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:44.358333 6152 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 22:21:44.358362 6152 factory.go:656] Stopping watch factory\\\\nI1126 22:21:44.358382 6152 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 22:21:44.358403 6152 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b780bb903b5642f0031c8419663bc2d18ac2bf8a9edd73fdddf49a45220a9d58\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"message\\\":\\\"2:21:46.595146 6353 factory.go:656] Stopping watch factory\\\\nI1126 22:21:46.595252 6353 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1126 22:21:46.595173 6353 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 22:21:46.595310 6353 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 22:21:46.595189 6353 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1126 22:21:46.595320 6353 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:46.595338 6353 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1126 22:21:46.595230 6353 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 22:21:46.595364 6353 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 22:21:46.595496 6353 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:46.595947 6353 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 22:21:46.596152 6353 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:46.596282 6353 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:47Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.529072 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:47Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.531231 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.531297 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.531319 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.531347 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.531366 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:47Z","lastTransitionTime":"2025-11-26T22:21:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.545336 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:47Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.564859 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:47Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.585251 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:47Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.610223 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:47Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.636407 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.636467 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.636484 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.636510 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.636529 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:47Z","lastTransitionTime":"2025-11-26T22:21:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.643277 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:47Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.659839 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:47Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.682828 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"18039c76-4e57-465f-9918-e618c823dff7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ft62j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:47Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.739521 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.739557 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.739565 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.739578 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.739587 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:47Z","lastTransitionTime":"2025-11-26T22:21:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.842987 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.843054 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.843071 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.843099 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.843115 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:47Z","lastTransitionTime":"2025-11-26T22:21:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.946059 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.946124 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.946143 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.946178 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:47 crc kubenswrapper[4903]: I1126 22:21:47.946200 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:47Z","lastTransitionTime":"2025-11-26T22:21:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.027766 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:21:48 crc kubenswrapper[4903]: E1126 22:21:48.027996 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.049328 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.049389 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.049407 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.049438 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.049461 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:48Z","lastTransitionTime":"2025-11-26T22:21:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.153160 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.153225 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.153242 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.153266 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.153283 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:48Z","lastTransitionTime":"2025-11-26T22:21:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.256829 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.256882 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.256899 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.256923 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.256939 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:48Z","lastTransitionTime":"2025-11-26T22:21:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.357784 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" event={"ID":"18039c76-4e57-465f-9918-e618c823dff7","Type":"ContainerStarted","Data":"f8876ce44b1ced5ec6305e0a4645258247d129d9d4895fb51fa7bd91c990e64f"} Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.359343 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.359390 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.359408 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.359431 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.359448 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:48Z","lastTransitionTime":"2025-11-26T22:21:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.361271 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bbznt_ef55a921-a95f-4b2b-84b7-98c1082a1bb6/ovnkube-controller/1.log" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.380945 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:48Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.400762 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:48Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.419003 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:48Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.441968 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:48Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.462894 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.462949 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.462967 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.462992 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.463010 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:48Z","lastTransitionTime":"2025-11-26T22:21:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.464089 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:48Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.485053 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:48Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.500875 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:48Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.520119 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"18039c76-4e57-465f-9918-e618c823dff7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eecaea6ba41a970d1434000705f94cc7c998d76bc9b0721d5955e7bea7dde57a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8876ce44b1ced5ec6305e0a4645258247d129d9d4895fb51fa7bd91c990e64f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ft62j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:48Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.543263 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:48Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.566098 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.566484 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.566659 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.566868 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.567007 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:48Z","lastTransitionTime":"2025-11-26T22:21:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.580731 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:48Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.607887 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:21:48 crc kubenswrapper[4903]: E1126 22:21:48.608096 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:22:04.608057593 +0000 UTC m=+53.298292533 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.608355 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:21:48 crc kubenswrapper[4903]: E1126 22:21:48.608581 4903 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 22:21:48 crc kubenswrapper[4903]: E1126 22:21:48.608662 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 22:22:04.60864641 +0000 UTC m=+53.298881360 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.613848 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b780bb903b5642f0031c8419663bc2d18ac2bf8a9edd73fdddf49a45220a9d58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a1fc877cd84029ff6db683fa5dbc1093f04cb4697e1982d2d0b6ff227a4fc3e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"message\\\":\\\"4\\\\nI1126 22:21:44.358183 6152 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 22:21:44.358202 6152 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 22:21:44.358214 6152 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 22:21:44.358205 6152 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1126 22:21:44.358264 6152 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 22:21:44.358266 6152 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 22:21:44.358294 6152 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 22:21:44.358330 6152 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:44.358333 6152 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 22:21:44.358362 6152 factory.go:656] Stopping watch factory\\\\nI1126 22:21:44.358382 6152 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 22:21:44.358403 6152 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b780bb903b5642f0031c8419663bc2d18ac2bf8a9edd73fdddf49a45220a9d58\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"message\\\":\\\"2:21:46.595146 6353 factory.go:656] Stopping watch factory\\\\nI1126 22:21:46.595252 6353 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1126 22:21:46.595173 6353 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 22:21:46.595310 6353 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 22:21:46.595189 6353 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1126 22:21:46.595320 6353 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:46.595338 6353 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1126 22:21:46.595230 6353 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 22:21:46.595364 6353 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 22:21:46.595496 6353 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:46.595947 6353 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 22:21:46.596152 6353 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:46.596282 6353 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:48Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.629948 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:48Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.649481 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:48Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.667827 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:48Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.669841 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.669883 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.669902 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.669930 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.669950 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:48Z","lastTransitionTime":"2025-11-26T22:21:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.692982 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa4d6694eef4e4b5bb0c06f961f4d9ad0670b58a9a450e98072559ed3ef57397\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:48Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.709414 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:21:48 crc kubenswrapper[4903]: E1126 22:21:48.709547 4903 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 22:21:48 crc kubenswrapper[4903]: E1126 22:21:48.709658 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 22:22:04.709628574 +0000 UTC m=+53.399863524 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.709552 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:21:48 crc kubenswrapper[4903]: E1126 22:21:48.709766 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 22:21:48 crc kubenswrapper[4903]: E1126 22:21:48.709805 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 22:21:48 crc kubenswrapper[4903]: E1126 22:21:48.709828 4903 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.709826 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:21:48 crc kubenswrapper[4903]: E1126 22:21:48.709910 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 22:22:04.709881391 +0000 UTC m=+53.400116401 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:21:48 crc kubenswrapper[4903]: E1126 22:21:48.710057 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 22:21:48 crc kubenswrapper[4903]: E1126 22:21:48.710098 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 22:21:48 crc kubenswrapper[4903]: E1126 22:21:48.710119 4903 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:21:48 crc kubenswrapper[4903]: E1126 22:21:48.710220 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 22:22:04.710196249 +0000 UTC m=+53.400431199 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.772611 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.772651 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.772662 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.772679 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.772713 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:48Z","lastTransitionTime":"2025-11-26T22:21:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.824810 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-q8dvw"] Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.825403 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:21:48 crc kubenswrapper[4903]: E1126 22:21:48.825484 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.848466 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:48Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.867181 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:48Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.875466 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.875523 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.875541 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.875570 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.875590 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:48Z","lastTransitionTime":"2025-11-26T22:21:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.881744 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-q8dvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef28737-00fd-4738-ae1f-e02a5b974905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-q8dvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:48Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.895119 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:48Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.915649 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:48Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.934646 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa4d6694eef4e4b5bb0c06f961f4d9ad0670b58a9a450e98072559ed3ef57397\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:48Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.960336 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b780bb903b5642f0031c8419663bc2d18ac2bf8a9edd73fdddf49a45220a9d58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a1fc877cd84029ff6db683fa5dbc1093f04cb4697e1982d2d0b6ff227a4fc3e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"message\\\":\\\"4\\\\nI1126 22:21:44.358183 6152 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 22:21:44.358202 6152 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 22:21:44.358214 6152 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 22:21:44.358205 6152 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1126 22:21:44.358264 6152 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 22:21:44.358266 6152 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 22:21:44.358294 6152 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 22:21:44.358330 6152 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:44.358333 6152 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 22:21:44.358362 6152 factory.go:656] Stopping watch factory\\\\nI1126 22:21:44.358382 6152 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 22:21:44.358403 6152 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b780bb903b5642f0031c8419663bc2d18ac2bf8a9edd73fdddf49a45220a9d58\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"message\\\":\\\"2:21:46.595146 6353 factory.go:656] Stopping watch factory\\\\nI1126 22:21:46.595252 6353 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1126 22:21:46.595173 6353 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 22:21:46.595310 6353 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 22:21:46.595189 6353 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1126 22:21:46.595320 6353 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:46.595338 6353 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1126 22:21:46.595230 6353 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 22:21:46.595364 6353 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 22:21:46.595496 6353 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:46.595947 6353 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 22:21:46.596152 6353 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:46.596282 6353 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:48Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.973796 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:48Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.977625 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.977663 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.977675 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.977703 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.977713 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:48Z","lastTransitionTime":"2025-11-26T22:21:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.984467 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:48Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:48 crc kubenswrapper[4903]: I1126 22:21:48.999199 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:48Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.008307 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:49Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.012373 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s2k42\" (UniqueName: \"kubernetes.io/projected/aef28737-00fd-4738-ae1f-e02a5b974905-kube-api-access-s2k42\") pod \"network-metrics-daemon-q8dvw\" (UID: \"aef28737-00fd-4738-ae1f-e02a5b974905\") " pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.012454 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/aef28737-00fd-4738-ae1f-e02a5b974905-metrics-certs\") pod \"network-metrics-daemon-q8dvw\" (UID: \"aef28737-00fd-4738-ae1f-e02a5b974905\") " pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.019598 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:49Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.027915 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:21:49 crc kubenswrapper[4903]: E1126 22:21:49.028033 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.027927 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:21:49 crc kubenswrapper[4903]: E1126 22:21:49.028191 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.037653 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:49Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.051719 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:49Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.062455 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:49Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.079390 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"18039c76-4e57-465f-9918-e618c823dff7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eecaea6ba41a970d1434000705f94cc7c998d76bc9b0721d5955e7bea7dde57a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8876ce44b1ced5ec6305e0a4645258247d129d9d4895fb51fa7bd91c990e64f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ft62j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:49Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.079950 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.080018 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.080041 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.080075 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.080099 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:49Z","lastTransitionTime":"2025-11-26T22:21:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.113652 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2k42\" (UniqueName: \"kubernetes.io/projected/aef28737-00fd-4738-ae1f-e02a5b974905-kube-api-access-s2k42\") pod \"network-metrics-daemon-q8dvw\" (UID: \"aef28737-00fd-4738-ae1f-e02a5b974905\") " pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.113795 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/aef28737-00fd-4738-ae1f-e02a5b974905-metrics-certs\") pod \"network-metrics-daemon-q8dvw\" (UID: \"aef28737-00fd-4738-ae1f-e02a5b974905\") " pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:21:49 crc kubenswrapper[4903]: E1126 22:21:49.114002 4903 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 22:21:49 crc kubenswrapper[4903]: E1126 22:21:49.114091 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/aef28737-00fd-4738-ae1f-e02a5b974905-metrics-certs podName:aef28737-00fd-4738-ae1f-e02a5b974905 nodeName:}" failed. No retries permitted until 2025-11-26 22:21:49.614069404 +0000 UTC m=+38.304304344 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/aef28737-00fd-4738-ae1f-e02a5b974905-metrics-certs") pod "network-metrics-daemon-q8dvw" (UID: "aef28737-00fd-4738-ae1f-e02a5b974905") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.144827 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2k42\" (UniqueName: \"kubernetes.io/projected/aef28737-00fd-4738-ae1f-e02a5b974905-kube-api-access-s2k42\") pod \"network-metrics-daemon-q8dvw\" (UID: \"aef28737-00fd-4738-ae1f-e02a5b974905\") " pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.163668 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.163756 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.163774 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.163799 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.163817 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:49Z","lastTransitionTime":"2025-11-26T22:21:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:49 crc kubenswrapper[4903]: E1126 22:21:49.186018 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:49Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.191603 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.191827 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.191968 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.192130 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.192272 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:49Z","lastTransitionTime":"2025-11-26T22:21:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:49 crc kubenswrapper[4903]: E1126 22:21:49.212644 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:49Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.217832 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.217899 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.217924 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.217952 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.217970 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:49Z","lastTransitionTime":"2025-11-26T22:21:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:49 crc kubenswrapper[4903]: E1126 22:21:49.236743 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:49Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.241575 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.241755 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.241777 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.241805 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.241822 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:49Z","lastTransitionTime":"2025-11-26T22:21:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:49 crc kubenswrapper[4903]: E1126 22:21:49.262552 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:49Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.271768 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.271969 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.272090 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.272235 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.272395 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:49Z","lastTransitionTime":"2025-11-26T22:21:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:49 crc kubenswrapper[4903]: E1126 22:21:49.295398 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:49Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:49 crc kubenswrapper[4903]: E1126 22:21:49.295960 4903 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.298139 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.298233 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.298252 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.298278 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.298296 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:49Z","lastTransitionTime":"2025-11-26T22:21:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.401417 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.401483 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.401500 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.401524 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.401543 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:49Z","lastTransitionTime":"2025-11-26T22:21:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.504551 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.504610 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.504626 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.504651 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.504668 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:49Z","lastTransitionTime":"2025-11-26T22:21:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.607984 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.608044 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.608060 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.608083 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.608100 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:49Z","lastTransitionTime":"2025-11-26T22:21:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.619514 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/aef28737-00fd-4738-ae1f-e02a5b974905-metrics-certs\") pod \"network-metrics-daemon-q8dvw\" (UID: \"aef28737-00fd-4738-ae1f-e02a5b974905\") " pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:21:49 crc kubenswrapper[4903]: E1126 22:21:49.619740 4903 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 22:21:49 crc kubenswrapper[4903]: E1126 22:21:49.619822 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/aef28737-00fd-4738-ae1f-e02a5b974905-metrics-certs podName:aef28737-00fd-4738-ae1f-e02a5b974905 nodeName:}" failed. No retries permitted until 2025-11-26 22:21:50.619800476 +0000 UTC m=+39.310035416 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/aef28737-00fd-4738-ae1f-e02a5b974905-metrics-certs") pod "network-metrics-daemon-q8dvw" (UID: "aef28737-00fd-4738-ae1f-e02a5b974905") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.711847 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.711894 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.711911 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.711936 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.711955 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:49Z","lastTransitionTime":"2025-11-26T22:21:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.815732 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.815790 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.815809 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.815831 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.815847 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:49Z","lastTransitionTime":"2025-11-26T22:21:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.919227 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.919298 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.919316 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.919342 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:49 crc kubenswrapper[4903]: I1126 22:21:49.919360 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:49Z","lastTransitionTime":"2025-11-26T22:21:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.023008 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.023081 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.023098 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.023123 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.023140 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:50Z","lastTransitionTime":"2025-11-26T22:21:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.027938 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:21:50 crc kubenswrapper[4903]: E1126 22:21:50.028140 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.126679 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.126771 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.126794 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.126817 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.126834 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:50Z","lastTransitionTime":"2025-11-26T22:21:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.152352 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.172583 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"18039c76-4e57-465f-9918-e618c823dff7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eecaea6ba41a970d1434000705f94cc7c998d76bc9b0721d5955e7bea7dde57a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8876ce44b1ced5ec6305e0a4645258247d129d9d4895fb51fa7bd91c990e64f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ft62j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:50Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.191506 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:50Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.211021 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:50Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.228514 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:50Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.230079 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.230140 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.230158 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.230181 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.230198 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:50Z","lastTransitionTime":"2025-11-26T22:21:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.250404 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:50Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.274193 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:50Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.290591 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-q8dvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef28737-00fd-4738-ae1f-e02a5b974905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-q8dvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:50Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.315483 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa4d6694eef4e4b5bb0c06f961f4d9ad0670b58a9a450e98072559ed3ef57397\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:50Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.333509 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.333561 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.333580 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.333611 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.333662 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:50Z","lastTransitionTime":"2025-11-26T22:21:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.345948 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b780bb903b5642f0031c8419663bc2d18ac2bf8a9edd73fdddf49a45220a9d58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a1fc877cd84029ff6db683fa5dbc1093f04cb4697e1982d2d0b6ff227a4fc3e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"message\\\":\\\"4\\\\nI1126 22:21:44.358183 6152 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 22:21:44.358202 6152 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 22:21:44.358214 6152 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 22:21:44.358205 6152 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1126 22:21:44.358264 6152 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 22:21:44.358266 6152 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 22:21:44.358294 6152 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 22:21:44.358330 6152 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:44.358333 6152 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 22:21:44.358362 6152 factory.go:656] Stopping watch factory\\\\nI1126 22:21:44.358382 6152 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 22:21:44.358403 6152 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b780bb903b5642f0031c8419663bc2d18ac2bf8a9edd73fdddf49a45220a9d58\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"message\\\":\\\"2:21:46.595146 6353 factory.go:656] Stopping watch factory\\\\nI1126 22:21:46.595252 6353 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1126 22:21:46.595173 6353 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 22:21:46.595310 6353 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 22:21:46.595189 6353 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1126 22:21:46.595320 6353 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:46.595338 6353 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1126 22:21:46.595230 6353 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 22:21:46.595364 6353 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 22:21:46.595496 6353 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:46.595947 6353 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 22:21:46.596152 6353 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:46.596282 6353 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:50Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.362866 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:50Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.381088 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:50Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.398989 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:50Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.419205 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:50Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.436318 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.436388 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.436449 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.436485 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.436513 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:50Z","lastTransitionTime":"2025-11-26T22:21:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.437411 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:50Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.454285 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:50Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.473764 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:50Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.539247 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.539308 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.539329 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.539356 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.539375 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:50Z","lastTransitionTime":"2025-11-26T22:21:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.630190 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/aef28737-00fd-4738-ae1f-e02a5b974905-metrics-certs\") pod \"network-metrics-daemon-q8dvw\" (UID: \"aef28737-00fd-4738-ae1f-e02a5b974905\") " pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:21:50 crc kubenswrapper[4903]: E1126 22:21:50.630350 4903 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 22:21:50 crc kubenswrapper[4903]: E1126 22:21:50.630456 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/aef28737-00fd-4738-ae1f-e02a5b974905-metrics-certs podName:aef28737-00fd-4738-ae1f-e02a5b974905 nodeName:}" failed. No retries permitted until 2025-11-26 22:21:52.630431007 +0000 UTC m=+41.320665957 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/aef28737-00fd-4738-ae1f-e02a5b974905-metrics-certs") pod "network-metrics-daemon-q8dvw" (UID: "aef28737-00fd-4738-ae1f-e02a5b974905") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.642036 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.642089 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.642106 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.642131 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.642147 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:50Z","lastTransitionTime":"2025-11-26T22:21:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.745283 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.745344 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.745361 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.745387 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.745405 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:50Z","lastTransitionTime":"2025-11-26T22:21:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.848381 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.848456 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.848484 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.848510 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.848531 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:50Z","lastTransitionTime":"2025-11-26T22:21:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.951611 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.951674 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.951723 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.951757 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:50 crc kubenswrapper[4903]: I1126 22:21:50.951775 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:50Z","lastTransitionTime":"2025-11-26T22:21:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.027965 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:21:51 crc kubenswrapper[4903]: E1126 22:21:51.028127 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.028186 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.028215 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:21:51 crc kubenswrapper[4903]: E1126 22:21:51.028325 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:21:51 crc kubenswrapper[4903]: E1126 22:21:51.028562 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.055037 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.055091 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.055111 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.055136 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.055155 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:51Z","lastTransitionTime":"2025-11-26T22:21:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.158197 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.158253 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.158272 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.158298 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.158321 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:51Z","lastTransitionTime":"2025-11-26T22:21:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.260816 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.260867 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.260884 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.260906 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.260922 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:51Z","lastTransitionTime":"2025-11-26T22:21:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.364084 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.364144 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.364162 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.364185 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.364204 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:51Z","lastTransitionTime":"2025-11-26T22:21:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.466842 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.466884 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.466900 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.466925 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.466941 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:51Z","lastTransitionTime":"2025-11-26T22:21:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.570027 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.570076 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.570092 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.570115 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.570131 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:51Z","lastTransitionTime":"2025-11-26T22:21:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.673097 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.673166 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.673184 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.673208 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.673227 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:51Z","lastTransitionTime":"2025-11-26T22:21:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.775925 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.776004 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.776027 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.776057 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.776080 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:51Z","lastTransitionTime":"2025-11-26T22:21:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.879297 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.879355 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.879371 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.879393 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.879409 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:51Z","lastTransitionTime":"2025-11-26T22:21:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.982495 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.982568 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.982586 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.982616 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:51 crc kubenswrapper[4903]: I1126 22:21:51.982639 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:51Z","lastTransitionTime":"2025-11-26T22:21:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.028305 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:21:52 crc kubenswrapper[4903]: E1126 22:21:52.028499 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.048058 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.069598 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.085664 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.085760 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.085779 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.085802 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.085819 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:52Z","lastTransitionTime":"2025-11-26T22:21:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.088612 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-q8dvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef28737-00fd-4738-ae1f-e02a5b974905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-q8dvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.107784 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.129039 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.157130 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.180873 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa4d6694eef4e4b5bb0c06f961f4d9ad0670b58a9a450e98072559ed3ef57397\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.188485 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.188553 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.188579 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.188609 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.188632 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:52Z","lastTransitionTime":"2025-11-26T22:21:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.212641 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b780bb903b5642f0031c8419663bc2d18ac2bf8a9edd73fdddf49a45220a9d58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a1fc877cd84029ff6db683fa5dbc1093f04cb4697e1982d2d0b6ff227a4fc3e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"message\\\":\\\"4\\\\nI1126 22:21:44.358183 6152 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 22:21:44.358202 6152 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 22:21:44.358214 6152 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 22:21:44.358205 6152 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1126 22:21:44.358264 6152 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 22:21:44.358266 6152 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 22:21:44.358294 6152 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 22:21:44.358330 6152 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:44.358333 6152 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 22:21:44.358362 6152 factory.go:656] Stopping watch factory\\\\nI1126 22:21:44.358382 6152 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 22:21:44.358403 6152 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b780bb903b5642f0031c8419663bc2d18ac2bf8a9edd73fdddf49a45220a9d58\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"message\\\":\\\"2:21:46.595146 6353 factory.go:656] Stopping watch factory\\\\nI1126 22:21:46.595252 6353 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1126 22:21:46.595173 6353 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 22:21:46.595310 6353 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 22:21:46.595189 6353 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1126 22:21:46.595320 6353 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:46.595338 6353 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1126 22:21:46.595230 6353 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 22:21:46.595364 6353 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 22:21:46.595496 6353 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:46.595947 6353 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 22:21:46.596152 6353 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:46.596282 6353 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.234743 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.252822 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.269834 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.292020 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.292087 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.292109 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.292143 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.292168 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:52Z","lastTransitionTime":"2025-11-26T22:21:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.299148 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.319066 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.339842 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.356593 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.376323 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"18039c76-4e57-465f-9918-e618c823dff7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eecaea6ba41a970d1434000705f94cc7c998d76bc9b0721d5955e7bea7dde57a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8876ce44b1ced5ec6305e0a4645258247d129d9d4895fb51fa7bd91c990e64f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ft62j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.395030 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.395333 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.395566 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.395785 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.395954 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:52Z","lastTransitionTime":"2025-11-26T22:21:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.499758 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.499825 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.499842 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.499866 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.499883 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:52Z","lastTransitionTime":"2025-11-26T22:21:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.602919 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.602986 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.603003 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.603028 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.603046 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:52Z","lastTransitionTime":"2025-11-26T22:21:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.649230 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/aef28737-00fd-4738-ae1f-e02a5b974905-metrics-certs\") pod \"network-metrics-daemon-q8dvw\" (UID: \"aef28737-00fd-4738-ae1f-e02a5b974905\") " pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:21:52 crc kubenswrapper[4903]: E1126 22:21:52.649450 4903 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 22:21:52 crc kubenswrapper[4903]: E1126 22:21:52.649597 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/aef28737-00fd-4738-ae1f-e02a5b974905-metrics-certs podName:aef28737-00fd-4738-ae1f-e02a5b974905 nodeName:}" failed. No retries permitted until 2025-11-26 22:21:56.649560461 +0000 UTC m=+45.339795441 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/aef28737-00fd-4738-ae1f-e02a5b974905-metrics-certs") pod "network-metrics-daemon-q8dvw" (UID: "aef28737-00fd-4738-ae1f-e02a5b974905") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.706316 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.706411 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.706430 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.706456 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.706474 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:52Z","lastTransitionTime":"2025-11-26T22:21:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.809827 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.809914 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.809937 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.809969 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.809992 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:52Z","lastTransitionTime":"2025-11-26T22:21:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.913088 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.913151 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.913168 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.913192 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:52 crc kubenswrapper[4903]: I1126 22:21:52.913212 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:52Z","lastTransitionTime":"2025-11-26T22:21:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.020103 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.020170 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.020187 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.020211 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.020229 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:53Z","lastTransitionTime":"2025-11-26T22:21:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.027595 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.027622 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:21:53 crc kubenswrapper[4903]: E1126 22:21:53.027834 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.027856 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:21:53 crc kubenswrapper[4903]: E1126 22:21:53.028028 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:21:53 crc kubenswrapper[4903]: E1126 22:21:53.028201 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.123202 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.123239 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.123247 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.123262 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.123272 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:53Z","lastTransitionTime":"2025-11-26T22:21:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.227012 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.227073 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.227090 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.227115 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.227132 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:53Z","lastTransitionTime":"2025-11-26T22:21:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.330399 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.330478 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.330502 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.330564 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.330590 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:53Z","lastTransitionTime":"2025-11-26T22:21:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.433882 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.433949 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.433971 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.434001 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.434021 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:53Z","lastTransitionTime":"2025-11-26T22:21:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.536318 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.536353 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.536363 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.536376 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.536385 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:53Z","lastTransitionTime":"2025-11-26T22:21:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.639438 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.639482 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.639495 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.639513 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.639525 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:53Z","lastTransitionTime":"2025-11-26T22:21:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.742844 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.742904 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.742919 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.742939 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.742951 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:53Z","lastTransitionTime":"2025-11-26T22:21:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.845566 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.845637 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.845651 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.845667 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.845680 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:53Z","lastTransitionTime":"2025-11-26T22:21:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.948775 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.948841 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.948861 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.948886 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:53 crc kubenswrapper[4903]: I1126 22:21:53.948904 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:53Z","lastTransitionTime":"2025-11-26T22:21:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.028491 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:21:54 crc kubenswrapper[4903]: E1126 22:21:54.028657 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.051483 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.051541 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.051560 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.051589 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.051610 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:54Z","lastTransitionTime":"2025-11-26T22:21:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.155892 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.155965 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.155994 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.156042 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.156065 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:54Z","lastTransitionTime":"2025-11-26T22:21:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.258887 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.259221 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.259438 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.259654 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.259918 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:54Z","lastTransitionTime":"2025-11-26T22:21:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.363602 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.363739 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.363766 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.363800 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.363823 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:54Z","lastTransitionTime":"2025-11-26T22:21:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.467580 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.467663 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.467687 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.467762 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.467787 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:54Z","lastTransitionTime":"2025-11-26T22:21:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.571565 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.571624 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.571643 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.571671 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.571854 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:54Z","lastTransitionTime":"2025-11-26T22:21:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.674775 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.674848 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.674866 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.674896 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.674916 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:54Z","lastTransitionTime":"2025-11-26T22:21:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.778304 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.778367 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.778383 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.778415 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.778435 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:54Z","lastTransitionTime":"2025-11-26T22:21:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.882011 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.882110 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.882143 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.882181 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.882203 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:54Z","lastTransitionTime":"2025-11-26T22:21:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.986460 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.986565 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.986589 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.986626 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:54 crc kubenswrapper[4903]: I1126 22:21:54.986649 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:54Z","lastTransitionTime":"2025-11-26T22:21:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.027582 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.027651 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:21:55 crc kubenswrapper[4903]: E1126 22:21:55.027766 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.027661 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:21:55 crc kubenswrapper[4903]: E1126 22:21:55.027912 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:21:55 crc kubenswrapper[4903]: E1126 22:21:55.028013 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.089637 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.089746 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.089771 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.089801 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.089821 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:55Z","lastTransitionTime":"2025-11-26T22:21:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.193173 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.193279 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.193306 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.193346 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.193373 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:55Z","lastTransitionTime":"2025-11-26T22:21:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.297969 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.298051 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.298073 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.298119 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.298138 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:55Z","lastTransitionTime":"2025-11-26T22:21:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.401550 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.401647 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.401666 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.401735 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.401761 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:55Z","lastTransitionTime":"2025-11-26T22:21:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.505223 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.505312 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.505332 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.505361 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.505383 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:55Z","lastTransitionTime":"2025-11-26T22:21:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.609102 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.609180 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.609199 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.609230 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.609250 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:55Z","lastTransitionTime":"2025-11-26T22:21:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.711506 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.711579 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.711599 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.711625 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.711643 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:55Z","lastTransitionTime":"2025-11-26T22:21:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.815339 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.815436 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.815455 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.815488 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.815508 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:55Z","lastTransitionTime":"2025-11-26T22:21:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.918268 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.918347 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.918365 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.918390 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:55 crc kubenswrapper[4903]: I1126 22:21:55.918407 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:55Z","lastTransitionTime":"2025-11-26T22:21:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.021855 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.021922 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.021945 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.021970 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.021987 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:56Z","lastTransitionTime":"2025-11-26T22:21:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.028245 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:21:56 crc kubenswrapper[4903]: E1126 22:21:56.028477 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.124892 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.124962 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.124981 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.125010 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.125028 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:56Z","lastTransitionTime":"2025-11-26T22:21:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.228656 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.228784 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.228808 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.228832 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.228852 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:56Z","lastTransitionTime":"2025-11-26T22:21:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.332421 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.332521 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.332544 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.332572 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.332594 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:56Z","lastTransitionTime":"2025-11-26T22:21:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.435541 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.435609 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.435626 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.435650 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.435667 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:56Z","lastTransitionTime":"2025-11-26T22:21:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.543872 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.543926 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.543943 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.543966 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.543982 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:56Z","lastTransitionTime":"2025-11-26T22:21:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.647112 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.647167 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.647188 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.647211 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.647229 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:56Z","lastTransitionTime":"2025-11-26T22:21:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.712512 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/aef28737-00fd-4738-ae1f-e02a5b974905-metrics-certs\") pod \"network-metrics-daemon-q8dvw\" (UID: \"aef28737-00fd-4738-ae1f-e02a5b974905\") " pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:21:56 crc kubenswrapper[4903]: E1126 22:21:56.712774 4903 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 22:21:56 crc kubenswrapper[4903]: E1126 22:21:56.712899 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/aef28737-00fd-4738-ae1f-e02a5b974905-metrics-certs podName:aef28737-00fd-4738-ae1f-e02a5b974905 nodeName:}" failed. No retries permitted until 2025-11-26 22:22:04.712868548 +0000 UTC m=+53.403103498 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/aef28737-00fd-4738-ae1f-e02a5b974905-metrics-certs") pod "network-metrics-daemon-q8dvw" (UID: "aef28737-00fd-4738-ae1f-e02a5b974905") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.749958 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.750272 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.750470 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.750636 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.750843 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:56Z","lastTransitionTime":"2025-11-26T22:21:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.853422 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.853518 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.853535 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.853559 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.853575 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:56Z","lastTransitionTime":"2025-11-26T22:21:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.956311 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.956366 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.956384 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.956409 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:56 crc kubenswrapper[4903]: I1126 22:21:56.956426 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:56Z","lastTransitionTime":"2025-11-26T22:21:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.028423 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.028468 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.028424 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:21:57 crc kubenswrapper[4903]: E1126 22:21:57.028584 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:21:57 crc kubenswrapper[4903]: E1126 22:21:57.028782 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:21:57 crc kubenswrapper[4903]: E1126 22:21:57.029046 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.059835 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.059893 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.059910 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.059933 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.059950 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:57Z","lastTransitionTime":"2025-11-26T22:21:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.163254 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.163323 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.163348 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.163376 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.163397 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:57Z","lastTransitionTime":"2025-11-26T22:21:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.266564 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.266651 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.266790 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.266835 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.266859 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:57Z","lastTransitionTime":"2025-11-26T22:21:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.370190 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.370272 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.370310 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.370343 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.370367 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:57Z","lastTransitionTime":"2025-11-26T22:21:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.474018 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.474076 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.474092 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.474117 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.474134 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:57Z","lastTransitionTime":"2025-11-26T22:21:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.577594 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.577671 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.577695 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.577777 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.577805 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:57Z","lastTransitionTime":"2025-11-26T22:21:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.680360 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.680425 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.680441 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.680465 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.680481 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:57Z","lastTransitionTime":"2025-11-26T22:21:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.783653 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.783743 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.783760 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.783784 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.783800 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:57Z","lastTransitionTime":"2025-11-26T22:21:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.885909 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.885956 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.885972 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.885993 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.886009 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:57Z","lastTransitionTime":"2025-11-26T22:21:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.989960 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.990018 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.990035 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.990058 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:57 crc kubenswrapper[4903]: I1126 22:21:57.990078 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:57Z","lastTransitionTime":"2025-11-26T22:21:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.027447 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:21:58 crc kubenswrapper[4903]: E1126 22:21:58.027610 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.093331 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.093365 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.093373 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.093387 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.093397 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:58Z","lastTransitionTime":"2025-11-26T22:21:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.196626 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.196731 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.196749 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.196771 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.196787 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:58Z","lastTransitionTime":"2025-11-26T22:21:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.301101 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.301172 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.301190 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.301217 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.301236 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:58Z","lastTransitionTime":"2025-11-26T22:21:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.404292 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.404362 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.404380 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.404404 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.404421 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:58Z","lastTransitionTime":"2025-11-26T22:21:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.506614 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.506688 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.506744 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.506769 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.506786 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:58Z","lastTransitionTime":"2025-11-26T22:21:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.609427 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.609501 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.609555 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.609579 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.609597 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:58Z","lastTransitionTime":"2025-11-26T22:21:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.712399 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.712474 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.712499 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.712528 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.712548 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:58Z","lastTransitionTime":"2025-11-26T22:21:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.816023 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.816067 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.816085 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.816109 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.816126 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:58Z","lastTransitionTime":"2025-11-26T22:21:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.922541 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.923139 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.923164 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.923189 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:58 crc kubenswrapper[4903]: I1126 22:21:58.923207 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:58Z","lastTransitionTime":"2025-11-26T22:21:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.026439 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.026504 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.026523 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.026548 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.026568 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:59Z","lastTransitionTime":"2025-11-26T22:21:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.027713 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.027757 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.027743 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:21:59 crc kubenswrapper[4903]: E1126 22:21:59.027883 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:21:59 crc kubenswrapper[4903]: E1126 22:21:59.028308 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:21:59 crc kubenswrapper[4903]: E1126 22:21:59.028456 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.028650 4903 scope.go:117] "RemoveContainer" containerID="b780bb903b5642f0031c8419663bc2d18ac2bf8a9edd73fdddf49a45220a9d58" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.046056 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.062552 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"18039c76-4e57-465f-9918-e618c823dff7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eecaea6ba41a970d1434000705f94cc7c998d76bc9b0721d5955e7bea7dde57a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8876ce44b1ced5ec6305e0a4645258247d129d9d4895fb51fa7bd91c990e64f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ft62j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.081007 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.108936 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.116311 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.130383 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.130486 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.130510 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.130542 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.130563 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:59Z","lastTransitionTime":"2025-11-26T22:21:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.132193 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.152897 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.169292 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-q8dvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef28737-00fd-4738-ae1f-e02a5b974905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-q8dvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.191004 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.216357 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa4d6694eef4e4b5bb0c06f961f4d9ad0670b58a9a450e98072559ed3ef57397\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.233849 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.234279 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.234460 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.234873 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.235060 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:59Z","lastTransitionTime":"2025-11-26T22:21:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.255198 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b780bb903b5642f0031c8419663bc2d18ac2bf8a9edd73fdddf49a45220a9d58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b780bb903b5642f0031c8419663bc2d18ac2bf8a9edd73fdddf49a45220a9d58\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"message\\\":\\\"2:21:46.595146 6353 factory.go:656] Stopping watch factory\\\\nI1126 22:21:46.595252 6353 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1126 22:21:46.595173 6353 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 22:21:46.595310 6353 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 22:21:46.595189 6353 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1126 22:21:46.595320 6353 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:46.595338 6353 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1126 22:21:46.595230 6353 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 22:21:46.595364 6353 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 22:21:46.595496 6353 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:46.595947 6353 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 22:21:46.596152 6353 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:46.596282 6353 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:45Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-bbznt_openshift-ovn-kubernetes(ef55a921-a95f-4b2b-84b7-98c1082a1bb6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.274978 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.300137 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.322831 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.340983 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.341018 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.341033 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.341051 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.341066 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:59Z","lastTransitionTime":"2025-11-26T22:21:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.341227 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.357954 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.373154 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.408885 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bbznt_ef55a921-a95f-4b2b-84b7-98c1082a1bb6/ovnkube-controller/1.log" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.411576 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" event={"ID":"ef55a921-a95f-4b2b-84b7-98c1082a1bb6","Type":"ContainerStarted","Data":"4f8766f396e0cb9ddd96b27b6d8fc7527c9941d4880205766836e53652d5005d"} Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.412153 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.424494 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-q8dvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef28737-00fd-4738-ae1f-e02a5b974905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-q8dvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.444425 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.444471 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.444485 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.444504 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.444515 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:59Z","lastTransitionTime":"2025-11-26T22:21:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.449841 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.468552 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.487114 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.508640 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.527251 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa4d6694eef4e4b5bb0c06f961f4d9ad0670b58a9a450e98072559ed3ef57397\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.546474 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.546514 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.546526 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.546543 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.546555 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:59Z","lastTransitionTime":"2025-11-26T22:21:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.548612 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f8766f396e0cb9ddd96b27b6d8fc7527c9941d4880205766836e53652d5005d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b780bb903b5642f0031c8419663bc2d18ac2bf8a9edd73fdddf49a45220a9d58\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"message\\\":\\\"2:21:46.595146 6353 factory.go:656] Stopping watch factory\\\\nI1126 22:21:46.595252 6353 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1126 22:21:46.595173 6353 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 22:21:46.595310 6353 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 22:21:46.595189 6353 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1126 22:21:46.595320 6353 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:46.595338 6353 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1126 22:21:46.595230 6353 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 22:21:46.595364 6353 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 22:21:46.595496 6353 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:46.595947 6353 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 22:21:46.596152 6353 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:46.596282 6353 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:45Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.560083 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.575922 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.587510 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.597931 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.609637 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.619896 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.629111 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.641279 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"18039c76-4e57-465f-9918-e618c823dff7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eecaea6ba41a970d1434000705f94cc7c998d76bc9b0721d5955e7bea7dde57a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8876ce44b1ced5ec6305e0a4645258247d129d9d4895fb51fa7bd91c990e64f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ft62j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.644923 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.644959 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.644968 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.644982 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.644992 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:59Z","lastTransitionTime":"2025-11-26T22:21:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.654149 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: E1126 22:21:59.655877 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.659597 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.659623 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.659634 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.659646 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.659654 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:59Z","lastTransitionTime":"2025-11-26T22:21:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:59 crc kubenswrapper[4903]: E1126 22:21:59.677784 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.681671 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.681734 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.681746 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.681765 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.681776 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:59Z","lastTransitionTime":"2025-11-26T22:21:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:59 crc kubenswrapper[4903]: E1126 22:21:59.700686 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.704528 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.704607 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.704626 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.704657 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.704675 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:59Z","lastTransitionTime":"2025-11-26T22:21:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:59 crc kubenswrapper[4903]: E1126 22:21:59.725440 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.729546 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.729590 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.729606 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.729630 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.729647 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:59Z","lastTransitionTime":"2025-11-26T22:21:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:59 crc kubenswrapper[4903]: E1126 22:21:59.744968 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:21:59Z is after 2025-08-24T17:21:41Z" Nov 26 22:21:59 crc kubenswrapper[4903]: E1126 22:21:59.745115 4903 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.747066 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.747146 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.747158 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.747176 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.747192 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:59Z","lastTransitionTime":"2025-11-26T22:21:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.849595 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.849638 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.849647 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.849661 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.849672 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:59Z","lastTransitionTime":"2025-11-26T22:21:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.952093 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.952137 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.952149 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.952166 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:21:59 crc kubenswrapper[4903]: I1126 22:21:59.952179 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:21:59Z","lastTransitionTime":"2025-11-26T22:21:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.028395 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:00 crc kubenswrapper[4903]: E1126 22:22:00.028587 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.054123 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.054191 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.054211 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.054235 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.054252 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:00Z","lastTransitionTime":"2025-11-26T22:22:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.156562 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.156632 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.156654 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.156678 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.156703 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:00Z","lastTransitionTime":"2025-11-26T22:22:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.259377 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.259443 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.259466 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.259495 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.259518 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:00Z","lastTransitionTime":"2025-11-26T22:22:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.363405 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.363480 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.363504 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.363538 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.363560 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:00Z","lastTransitionTime":"2025-11-26T22:22:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.417909 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bbznt_ef55a921-a95f-4b2b-84b7-98c1082a1bb6/ovnkube-controller/2.log" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.418912 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bbznt_ef55a921-a95f-4b2b-84b7-98c1082a1bb6/ovnkube-controller/1.log" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.423260 4903 generic.go:334] "Generic (PLEG): container finished" podID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerID="4f8766f396e0cb9ddd96b27b6d8fc7527c9941d4880205766836e53652d5005d" exitCode=1 Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.423304 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" event={"ID":"ef55a921-a95f-4b2b-84b7-98c1082a1bb6","Type":"ContainerDied","Data":"4f8766f396e0cb9ddd96b27b6d8fc7527c9941d4880205766836e53652d5005d"} Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.423374 4903 scope.go:117] "RemoveContainer" containerID="b780bb903b5642f0031c8419663bc2d18ac2bf8a9edd73fdddf49a45220a9d58" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.424425 4903 scope.go:117] "RemoveContainer" containerID="4f8766f396e0cb9ddd96b27b6d8fc7527c9941d4880205766836e53652d5005d" Nov 26 22:22:00 crc kubenswrapper[4903]: E1126 22:22:00.424857 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-bbznt_openshift-ovn-kubernetes(ef55a921-a95f-4b2b-84b7-98c1082a1bb6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.446791 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:00Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.467025 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.467099 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.467128 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.467165 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.467190 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:00Z","lastTransitionTime":"2025-11-26T22:22:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.467870 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:00Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.487016 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:00Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.513928 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:00Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.535681 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:00Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.552890 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"18039c76-4e57-465f-9918-e618c823dff7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eecaea6ba41a970d1434000705f94cc7c998d76bc9b0721d5955e7bea7dde57a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8876ce44b1ced5ec6305e0a4645258247d129d9d4895fb51fa7bd91c990e64f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ft62j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:00Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.570356 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.570413 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.570431 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.570459 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.570478 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:00Z","lastTransitionTime":"2025-11-26T22:22:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.571276 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:00Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.588147 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:00Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.607771 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:00Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.626605 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:00Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.641241 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-q8dvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef28737-00fd-4738-ae1f-e02a5b974905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-q8dvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:00Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.658271 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:00Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.673159 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.673207 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.673224 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.673248 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.673265 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:00Z","lastTransitionTime":"2025-11-26T22:22:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.678239 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa4d6694eef4e4b5bb0c06f961f4d9ad0670b58a9a450e98072559ed3ef57397\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:00Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.707207 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f8766f396e0cb9ddd96b27b6d8fc7527c9941d4880205766836e53652d5005d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b780bb903b5642f0031c8419663bc2d18ac2bf8a9edd73fdddf49a45220a9d58\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"message\\\":\\\"2:21:46.595146 6353 factory.go:656] Stopping watch factory\\\\nI1126 22:21:46.595252 6353 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1126 22:21:46.595173 6353 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 22:21:46.595310 6353 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 22:21:46.595189 6353 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1126 22:21:46.595320 6353 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:46.595338 6353 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1126 22:21:46.595230 6353 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 22:21:46.595364 6353 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 22:21:46.595496 6353 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:46.595947 6353 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 22:21:46.596152 6353 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 22:21:46.596282 6353 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:45Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f8766f396e0cb9ddd96b27b6d8fc7527c9941d4880205766836e53652d5005d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:22:00Z\\\",\\\"message\\\":\\\"[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/olm-operator-metrics\\\\\\\"}\\\\nI1126 22:21:59.996094 6549 services_controller.go:360] Finished syncing service olm-operator-metrics on namespace openshift-operator-lifecycle-manager for network=default : 1.571654ms\\\\nI1126 22:21:59.996100 6549 address_set.go:302] New(bf133528-8652-4c84-85ff-881f0afe9837/default-network-controller:EgressService:egresssvc-served-pods:v4/a13607449821398607916) with []\\\\nI1126 22:21:59.996165 6549 factory.go:1336] Added *v1.Node event handler 7\\\\nI1126 22:21:59.996226 6549 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1126 22:21:59.996567 6549 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1126 22:21:59.996607 6549 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"default/kubernetes\\\\\\\"}\\\\nI1126 22:21:59.996630 6549 services_controller.go:360] Finished syncing service kubernetes on namespace default for network=default : 1.690146ms\\\\nI1126 22:21:59.996675 6549 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1126 22:21:59.996753 6549 ovnkube.go:599] Stopped ovnkube\\\\nI1126 22:21:59.996794 6549 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 22:21:59.996872 6549 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:00Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.721456 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:00Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.747120 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:00Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.776296 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.776340 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.776359 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.776387 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.776405 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:00Z","lastTransitionTime":"2025-11-26T22:22:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.879838 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.879908 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.879927 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.879952 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.879970 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:00Z","lastTransitionTime":"2025-11-26T22:22:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.982558 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.982608 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.982625 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.982648 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:00 crc kubenswrapper[4903]: I1126 22:22:00.982666 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:00Z","lastTransitionTime":"2025-11-26T22:22:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.028153 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.028229 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.028269 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:01 crc kubenswrapper[4903]: E1126 22:22:01.028403 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:22:01 crc kubenswrapper[4903]: E1126 22:22:01.028542 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:22:01 crc kubenswrapper[4903]: E1126 22:22:01.028686 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.085940 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.085968 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.085978 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.085991 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.086022 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:01Z","lastTransitionTime":"2025-11-26T22:22:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.189806 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.189852 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.189864 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.189882 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.189893 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:01Z","lastTransitionTime":"2025-11-26T22:22:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.293164 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.293229 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.293246 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.293269 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.293286 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:01Z","lastTransitionTime":"2025-11-26T22:22:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.396915 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.396972 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.396989 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.397013 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.397034 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:01Z","lastTransitionTime":"2025-11-26T22:22:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.429391 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bbznt_ef55a921-a95f-4b2b-84b7-98c1082a1bb6/ovnkube-controller/2.log" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.434925 4903 scope.go:117] "RemoveContainer" containerID="4f8766f396e0cb9ddd96b27b6d8fc7527c9941d4880205766836e53652d5005d" Nov 26 22:22:01 crc kubenswrapper[4903]: E1126 22:22:01.435192 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-bbznt_openshift-ovn-kubernetes(ef55a921-a95f-4b2b-84b7-98c1082a1bb6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.456214 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:01Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.475730 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:01Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.496392 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:01Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.499880 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.499943 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.499963 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.499991 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.500010 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:01Z","lastTransitionTime":"2025-11-26T22:22:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.517050 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:01Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.536247 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:01Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.555395 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:01Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.571556 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:01Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.588370 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"18039c76-4e57-465f-9918-e618c823dff7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eecaea6ba41a970d1434000705f94cc7c998d76bc9b0721d5955e7bea7dde57a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8876ce44b1ced5ec6305e0a4645258247d129d9d4895fb51fa7bd91c990e64f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ft62j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:01Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.603130 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.603185 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.603200 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.603221 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.603236 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:01Z","lastTransitionTime":"2025-11-26T22:22:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.610994 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:01Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.630821 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:01Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.646611 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-q8dvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef28737-00fd-4738-ae1f-e02a5b974905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-q8dvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:01Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.662637 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:01Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.684034 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:01Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.704546 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:01Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.706852 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.706911 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.706945 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.706977 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.706996 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:01Z","lastTransitionTime":"2025-11-26T22:22:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.735784 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa4d6694eef4e4b5bb0c06f961f4d9ad0670b58a9a450e98072559ed3ef57397\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:01Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.768524 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f8766f396e0cb9ddd96b27b6d8fc7527c9941d4880205766836e53652d5005d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f8766f396e0cb9ddd96b27b6d8fc7527c9941d4880205766836e53652d5005d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:22:00Z\\\",\\\"message\\\":\\\"[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/olm-operator-metrics\\\\\\\"}\\\\nI1126 22:21:59.996094 6549 services_controller.go:360] Finished syncing service olm-operator-metrics on namespace openshift-operator-lifecycle-manager for network=default : 1.571654ms\\\\nI1126 22:21:59.996100 6549 address_set.go:302] New(bf133528-8652-4c84-85ff-881f0afe9837/default-network-controller:EgressService:egresssvc-served-pods:v4/a13607449821398607916) with []\\\\nI1126 22:21:59.996165 6549 factory.go:1336] Added *v1.Node event handler 7\\\\nI1126 22:21:59.996226 6549 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1126 22:21:59.996567 6549 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1126 22:21:59.996607 6549 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"default/kubernetes\\\\\\\"}\\\\nI1126 22:21:59.996630 6549 services_controller.go:360] Finished syncing service kubernetes on namespace default for network=default : 1.690146ms\\\\nI1126 22:21:59.996675 6549 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1126 22:21:59.996753 6549 ovnkube.go:599] Stopped ovnkube\\\\nI1126 22:21:59.996794 6549 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 22:21:59.996872 6549 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:59Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-bbznt_openshift-ovn-kubernetes(ef55a921-a95f-4b2b-84b7-98c1082a1bb6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:01Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.810685 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.810784 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.810810 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.810840 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.810862 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:01Z","lastTransitionTime":"2025-11-26T22:22:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.913840 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.913910 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.913928 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.913953 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:01 crc kubenswrapper[4903]: I1126 22:22:01.913972 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:01Z","lastTransitionTime":"2025-11-26T22:22:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.017684 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.017774 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.017790 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.017814 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.017831 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:02Z","lastTransitionTime":"2025-11-26T22:22:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.027743 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:02 crc kubenswrapper[4903]: E1126 22:22:02.027964 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.052234 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:02Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.074157 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:02Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.089771 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-q8dvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef28737-00fd-4738-ae1f-e02a5b974905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-q8dvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:02Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.109196 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:02Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.120747 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.120823 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.120847 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.120881 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.120906 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:02Z","lastTransitionTime":"2025-11-26T22:22:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.133572 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa4d6694eef4e4b5bb0c06f961f4d9ad0670b58a9a450e98072559ed3ef57397\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:02Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.160947 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f8766f396e0cb9ddd96b27b6d8fc7527c9941d4880205766836e53652d5005d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f8766f396e0cb9ddd96b27b6d8fc7527c9941d4880205766836e53652d5005d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:22:00Z\\\",\\\"message\\\":\\\"[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/olm-operator-metrics\\\\\\\"}\\\\nI1126 22:21:59.996094 6549 services_controller.go:360] Finished syncing service olm-operator-metrics on namespace openshift-operator-lifecycle-manager for network=default : 1.571654ms\\\\nI1126 22:21:59.996100 6549 address_set.go:302] New(bf133528-8652-4c84-85ff-881f0afe9837/default-network-controller:EgressService:egresssvc-served-pods:v4/a13607449821398607916) with []\\\\nI1126 22:21:59.996165 6549 factory.go:1336] Added *v1.Node event handler 7\\\\nI1126 22:21:59.996226 6549 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1126 22:21:59.996567 6549 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1126 22:21:59.996607 6549 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"default/kubernetes\\\\\\\"}\\\\nI1126 22:21:59.996630 6549 services_controller.go:360] Finished syncing service kubernetes on namespace default for network=default : 1.690146ms\\\\nI1126 22:21:59.996675 6549 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1126 22:21:59.996753 6549 ovnkube.go:599] Stopped ovnkube\\\\nI1126 22:21:59.996794 6549 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 22:21:59.996872 6549 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:59Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-bbznt_openshift-ovn-kubernetes(ef55a921-a95f-4b2b-84b7-98c1082a1bb6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:02Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.176400 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:02Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.198225 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:02Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.218164 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:02Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.223605 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.223641 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.223650 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.223667 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.223685 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:02Z","lastTransitionTime":"2025-11-26T22:22:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.235516 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:02Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.250912 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:02Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.266467 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:02Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.282281 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:02Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.298296 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"18039c76-4e57-465f-9918-e618c823dff7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eecaea6ba41a970d1434000705f94cc7c998d76bc9b0721d5955e7bea7dde57a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8876ce44b1ced5ec6305e0a4645258247d129d9d4895fb51fa7bd91c990e64f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ft62j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:02Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.321796 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:02Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.326817 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.326888 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.326912 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.326944 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.326967 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:02Z","lastTransitionTime":"2025-11-26T22:22:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.341659 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:02Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.430202 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.430272 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.430299 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.430332 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.430355 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:02Z","lastTransitionTime":"2025-11-26T22:22:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.534376 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.534457 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.534480 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.534507 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.534524 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:02Z","lastTransitionTime":"2025-11-26T22:22:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.637477 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.637539 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.637557 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.637579 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.637596 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:02Z","lastTransitionTime":"2025-11-26T22:22:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.740995 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.741060 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.741089 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.741118 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.741140 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:02Z","lastTransitionTime":"2025-11-26T22:22:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.844142 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.844196 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.844213 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.844236 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.844253 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:02Z","lastTransitionTime":"2025-11-26T22:22:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.947115 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.947184 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.947201 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.947228 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:02 crc kubenswrapper[4903]: I1126 22:22:02.947245 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:02Z","lastTransitionTime":"2025-11-26T22:22:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.027772 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.027800 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.027800 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:03 crc kubenswrapper[4903]: E1126 22:22:03.028104 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:22:03 crc kubenswrapper[4903]: E1126 22:22:03.028250 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:22:03 crc kubenswrapper[4903]: E1126 22:22:03.028410 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.051434 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.051517 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.051536 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.051561 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.051580 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:03Z","lastTransitionTime":"2025-11-26T22:22:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.154643 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.154747 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.154772 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.154801 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.154827 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:03Z","lastTransitionTime":"2025-11-26T22:22:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.258246 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.258304 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.258321 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.258345 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.258362 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:03Z","lastTransitionTime":"2025-11-26T22:22:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.361195 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.361257 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.361275 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.361302 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.361320 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:03Z","lastTransitionTime":"2025-11-26T22:22:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.464096 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.464155 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.464170 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.464192 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.464209 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:03Z","lastTransitionTime":"2025-11-26T22:22:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.516799 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.529508 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.535477 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"18039c76-4e57-465f-9918-e618c823dff7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eecaea6ba41a970d1434000705f94cc7c998d76bc9b0721d5955e7bea7dde57a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8876ce44b1ced5ec6305e0a4645258247d129d9d4895fb51fa7bd91c990e64f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ft62j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:03Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.552768 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:03Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.568950 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.569018 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.569036 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.569068 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.569090 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:03Z","lastTransitionTime":"2025-11-26T22:22:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.574439 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:03Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.589973 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:03Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.612856 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:03Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.629461 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:03Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.644112 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-q8dvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef28737-00fd-4738-ae1f-e02a5b974905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-q8dvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:03Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.666542 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa4d6694eef4e4b5bb0c06f961f4d9ad0670b58a9a450e98072559ed3ef57397\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:03Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.671596 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.671641 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.671659 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.671685 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.671731 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:03Z","lastTransitionTime":"2025-11-26T22:22:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.696079 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f8766f396e0cb9ddd96b27b6d8fc7527c9941d4880205766836e53652d5005d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f8766f396e0cb9ddd96b27b6d8fc7527c9941d4880205766836e53652d5005d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:22:00Z\\\",\\\"message\\\":\\\"[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/olm-operator-metrics\\\\\\\"}\\\\nI1126 22:21:59.996094 6549 services_controller.go:360] Finished syncing service olm-operator-metrics on namespace openshift-operator-lifecycle-manager for network=default : 1.571654ms\\\\nI1126 22:21:59.996100 6549 address_set.go:302] New(bf133528-8652-4c84-85ff-881f0afe9837/default-network-controller:EgressService:egresssvc-served-pods:v4/a13607449821398607916) with []\\\\nI1126 22:21:59.996165 6549 factory.go:1336] Added *v1.Node event handler 7\\\\nI1126 22:21:59.996226 6549 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1126 22:21:59.996567 6549 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1126 22:21:59.996607 6549 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"default/kubernetes\\\\\\\"}\\\\nI1126 22:21:59.996630 6549 services_controller.go:360] Finished syncing service kubernetes on namespace default for network=default : 1.690146ms\\\\nI1126 22:21:59.996675 6549 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1126 22:21:59.996753 6549 ovnkube.go:599] Stopped ovnkube\\\\nI1126 22:21:59.996794 6549 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 22:21:59.996872 6549 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:59Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-bbznt_openshift-ovn-kubernetes(ef55a921-a95f-4b2b-84b7-98c1082a1bb6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:03Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.713352 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:03Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.731975 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:03Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.750013 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:03Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.770206 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:03Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.775987 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.776048 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.776066 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.776090 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.776107 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:03Z","lastTransitionTime":"2025-11-26T22:22:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.786035 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:03Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.803542 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:03Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.823940 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:03Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.879874 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.879935 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.879953 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.879983 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.880002 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:03Z","lastTransitionTime":"2025-11-26T22:22:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.982839 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.982910 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.982927 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.982951 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:03 crc kubenswrapper[4903]: I1126 22:22:03.982968 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:03Z","lastTransitionTime":"2025-11-26T22:22:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.027991 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:04 crc kubenswrapper[4903]: E1126 22:22:04.028187 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.091245 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.091354 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.091417 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.091441 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.091496 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:04Z","lastTransitionTime":"2025-11-26T22:22:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.194022 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.194103 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.194127 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.194158 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.194180 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:04Z","lastTransitionTime":"2025-11-26T22:22:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.298235 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.298322 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.298341 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.298366 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.298386 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:04Z","lastTransitionTime":"2025-11-26T22:22:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.401606 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.401666 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.401684 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.401747 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.401769 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:04Z","lastTransitionTime":"2025-11-26T22:22:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.505318 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.505383 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.505408 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.505437 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.505456 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:04Z","lastTransitionTime":"2025-11-26T22:22:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.609428 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.609500 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.609521 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.609553 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.609575 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:04Z","lastTransitionTime":"2025-11-26T22:22:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.652819 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.653014 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:04 crc kubenswrapper[4903]: E1126 22:22:04.653074 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:22:36.653028472 +0000 UTC m=+85.343263422 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:22:04 crc kubenswrapper[4903]: E1126 22:22:04.653175 4903 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 22:22:04 crc kubenswrapper[4903]: E1126 22:22:04.653265 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 22:22:36.653241237 +0000 UTC m=+85.343476177 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.712851 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.712915 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.712936 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.712965 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.712987 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:04Z","lastTransitionTime":"2025-11-26T22:22:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.753743 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.753819 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/aef28737-00fd-4738-ae1f-e02a5b974905-metrics-certs\") pod \"network-metrics-daemon-q8dvw\" (UID: \"aef28737-00fd-4738-ae1f-e02a5b974905\") " pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.753882 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.753916 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:04 crc kubenswrapper[4903]: E1126 22:22:04.753924 4903 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 22:22:04 crc kubenswrapper[4903]: E1126 22:22:04.754014 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 22:22:36.753989985 +0000 UTC m=+85.444224925 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 22:22:04 crc kubenswrapper[4903]: E1126 22:22:04.754055 4903 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 22:22:04 crc kubenswrapper[4903]: E1126 22:22:04.754167 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/aef28737-00fd-4738-ae1f-e02a5b974905-metrics-certs podName:aef28737-00fd-4738-ae1f-e02a5b974905 nodeName:}" failed. No retries permitted until 2025-11-26 22:22:20.754139289 +0000 UTC m=+69.444374409 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/aef28737-00fd-4738-ae1f-e02a5b974905-metrics-certs") pod "network-metrics-daemon-q8dvw" (UID: "aef28737-00fd-4738-ae1f-e02a5b974905") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 22:22:04 crc kubenswrapper[4903]: E1126 22:22:04.754061 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 22:22:04 crc kubenswrapper[4903]: E1126 22:22:04.754228 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 22:22:04 crc kubenswrapper[4903]: E1126 22:22:04.754248 4903 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:22:04 crc kubenswrapper[4903]: E1126 22:22:04.754307 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 22:22:36.754293123 +0000 UTC m=+85.444528073 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:22:04 crc kubenswrapper[4903]: E1126 22:22:04.754379 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 22:22:04 crc kubenswrapper[4903]: E1126 22:22:04.754454 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 22:22:04 crc kubenswrapper[4903]: E1126 22:22:04.754481 4903 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:22:04 crc kubenswrapper[4903]: E1126 22:22:04.754673 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 22:22:36.754631252 +0000 UTC m=+85.444866342 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.817326 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.817446 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.817503 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.817537 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.817592 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:04Z","lastTransitionTime":"2025-11-26T22:22:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.921116 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.921188 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.921214 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.921244 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:04 crc kubenswrapper[4903]: I1126 22:22:04.921264 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:04Z","lastTransitionTime":"2025-11-26T22:22:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.025473 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.025553 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.025575 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.025656 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.025684 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:05Z","lastTransitionTime":"2025-11-26T22:22:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.027831 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.027896 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.028293 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:05 crc kubenswrapper[4903]: E1126 22:22:05.028316 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:22:05 crc kubenswrapper[4903]: E1126 22:22:05.028411 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:22:05 crc kubenswrapper[4903]: E1126 22:22:05.028579 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.129828 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.129902 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.129926 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.129953 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.129972 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:05Z","lastTransitionTime":"2025-11-26T22:22:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.232920 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.232982 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.233005 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.233035 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.233058 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:05Z","lastTransitionTime":"2025-11-26T22:22:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.336163 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.336223 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.336244 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.336267 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.336284 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:05Z","lastTransitionTime":"2025-11-26T22:22:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.440606 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.441107 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.441166 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.441194 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.441249 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:05Z","lastTransitionTime":"2025-11-26T22:22:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.545959 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.546028 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.546044 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.546069 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.546085 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:05Z","lastTransitionTime":"2025-11-26T22:22:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.648539 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.648586 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.648604 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.648627 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.648646 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:05Z","lastTransitionTime":"2025-11-26T22:22:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.751959 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.752011 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.752027 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.752053 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.752072 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:05Z","lastTransitionTime":"2025-11-26T22:22:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.855153 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.855236 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.855259 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.855290 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.855313 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:05Z","lastTransitionTime":"2025-11-26T22:22:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.958348 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.958411 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.958428 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.958454 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:05 crc kubenswrapper[4903]: I1126 22:22:05.958471 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:05Z","lastTransitionTime":"2025-11-26T22:22:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.028002 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:06 crc kubenswrapper[4903]: E1126 22:22:06.028187 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.061872 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.061930 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.061949 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.061973 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.061991 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:06Z","lastTransitionTime":"2025-11-26T22:22:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.165074 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.165140 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.165198 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.165224 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.165241 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:06Z","lastTransitionTime":"2025-11-26T22:22:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.268587 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.268726 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.268748 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.268772 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.268796 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:06Z","lastTransitionTime":"2025-11-26T22:22:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.372305 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.372454 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.372523 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.372596 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.372624 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:06Z","lastTransitionTime":"2025-11-26T22:22:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.476647 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.476762 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.476778 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.476802 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.476819 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:06Z","lastTransitionTime":"2025-11-26T22:22:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.584995 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.585048 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.585068 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.585102 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.585121 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:06Z","lastTransitionTime":"2025-11-26T22:22:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.689014 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.689099 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.689153 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.689185 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.689202 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:06Z","lastTransitionTime":"2025-11-26T22:22:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.792517 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.792574 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.792591 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.792613 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.792629 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:06Z","lastTransitionTime":"2025-11-26T22:22:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.895737 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.895802 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.895821 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.895845 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.895863 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:06Z","lastTransitionTime":"2025-11-26T22:22:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.998967 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.999034 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.999051 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.999076 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:06 crc kubenswrapper[4903]: I1126 22:22:06.999093 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:06Z","lastTransitionTime":"2025-11-26T22:22:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.027428 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.027531 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.027441 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:07 crc kubenswrapper[4903]: E1126 22:22:07.027610 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:22:07 crc kubenswrapper[4903]: E1126 22:22:07.027758 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:22:07 crc kubenswrapper[4903]: E1126 22:22:07.027908 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.102368 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.102437 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.102455 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.102483 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.102501 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:07Z","lastTransitionTime":"2025-11-26T22:22:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.205448 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.205514 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.205532 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.205556 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.205574 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:07Z","lastTransitionTime":"2025-11-26T22:22:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.308657 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.308775 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.308801 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.308834 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.308852 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:07Z","lastTransitionTime":"2025-11-26T22:22:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.411768 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.411851 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.411878 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.411907 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.411929 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:07Z","lastTransitionTime":"2025-11-26T22:22:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.515148 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.515209 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.515227 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.515252 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.515272 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:07Z","lastTransitionTime":"2025-11-26T22:22:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.618295 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.618362 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.618420 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.618452 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.618474 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:07Z","lastTransitionTime":"2025-11-26T22:22:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.721891 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.721961 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.721976 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.722001 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.722020 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:07Z","lastTransitionTime":"2025-11-26T22:22:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.825011 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.825091 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.825114 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.825144 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.825169 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:07Z","lastTransitionTime":"2025-11-26T22:22:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.929797 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.929850 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.929862 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.929878 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:07 crc kubenswrapper[4903]: I1126 22:22:07.929889 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:07Z","lastTransitionTime":"2025-11-26T22:22:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.027999 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:08 crc kubenswrapper[4903]: E1126 22:22:08.028219 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.033561 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.033612 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.033636 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.033672 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.033733 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:08Z","lastTransitionTime":"2025-11-26T22:22:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.136880 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.136951 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.136977 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.137009 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.137029 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:08Z","lastTransitionTime":"2025-11-26T22:22:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.239914 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.239983 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.240016 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.240040 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.240061 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:08Z","lastTransitionTime":"2025-11-26T22:22:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.343460 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.343526 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.343548 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.343578 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.343601 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:08Z","lastTransitionTime":"2025-11-26T22:22:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.447571 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.447804 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.447836 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.447864 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.447888 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:08Z","lastTransitionTime":"2025-11-26T22:22:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.551187 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.551272 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.551290 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.551319 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.551337 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:08Z","lastTransitionTime":"2025-11-26T22:22:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.654838 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.654908 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.654924 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.654947 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.654965 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:08Z","lastTransitionTime":"2025-11-26T22:22:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.758262 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.758346 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.758373 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.758398 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.758417 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:08Z","lastTransitionTime":"2025-11-26T22:22:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.861759 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.861843 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.861866 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.861891 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.861907 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:08Z","lastTransitionTime":"2025-11-26T22:22:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.964872 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.964945 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.964962 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.964989 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:08 crc kubenswrapper[4903]: I1126 22:22:08.965007 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:08Z","lastTransitionTime":"2025-11-26T22:22:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.028283 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.028304 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.028394 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:09 crc kubenswrapper[4903]: E1126 22:22:09.028731 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:22:09 crc kubenswrapper[4903]: E1126 22:22:09.028843 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:22:09 crc kubenswrapper[4903]: E1126 22:22:09.028967 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.068656 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.068773 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.068798 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.068828 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.068854 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:09Z","lastTransitionTime":"2025-11-26T22:22:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.171343 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.171411 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.171435 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.171465 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.171487 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:09Z","lastTransitionTime":"2025-11-26T22:22:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.273884 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.273948 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.273985 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.274016 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.274035 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:09Z","lastTransitionTime":"2025-11-26T22:22:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.377267 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.377339 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.377363 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.377392 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.377413 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:09Z","lastTransitionTime":"2025-11-26T22:22:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.479735 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.479785 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.479803 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.479826 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.479845 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:09Z","lastTransitionTime":"2025-11-26T22:22:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.582427 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.582469 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.582487 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.582507 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.582523 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:09Z","lastTransitionTime":"2025-11-26T22:22:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.685074 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.685138 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.685161 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.685192 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.685215 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:09Z","lastTransitionTime":"2025-11-26T22:22:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.787735 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.787798 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.787820 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.787846 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.787863 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:09Z","lastTransitionTime":"2025-11-26T22:22:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.797390 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.797467 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.797487 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.797520 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.797543 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:09Z","lastTransitionTime":"2025-11-26T22:22:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:09 crc kubenswrapper[4903]: E1126 22:22:09.819100 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:09Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.824120 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.824187 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.824204 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.824231 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.824248 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:09Z","lastTransitionTime":"2025-11-26T22:22:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:09 crc kubenswrapper[4903]: E1126 22:22:09.844192 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:09Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.849170 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.849230 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.849254 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.849286 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.849309 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:09Z","lastTransitionTime":"2025-11-26T22:22:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:09 crc kubenswrapper[4903]: E1126 22:22:09.869223 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:09Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.874618 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.874688 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.874737 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.874762 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.874781 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:09Z","lastTransitionTime":"2025-11-26T22:22:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:09 crc kubenswrapper[4903]: E1126 22:22:09.894262 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:09Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.900065 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.900140 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.900158 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.900183 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.900201 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:09Z","lastTransitionTime":"2025-11-26T22:22:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:09 crc kubenswrapper[4903]: E1126 22:22:09.923475 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:09Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:09 crc kubenswrapper[4903]: E1126 22:22:09.923724 4903 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.926001 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.926071 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.926094 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.926124 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:09 crc kubenswrapper[4903]: I1126 22:22:09.926142 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:09Z","lastTransitionTime":"2025-11-26T22:22:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.027665 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:10 crc kubenswrapper[4903]: E1126 22:22:10.027902 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.029072 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.029151 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.029173 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.029203 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.029225 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:10Z","lastTransitionTime":"2025-11-26T22:22:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.132196 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.132252 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.132269 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.132291 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.132309 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:10Z","lastTransitionTime":"2025-11-26T22:22:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.234593 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.234674 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.234687 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.234734 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.234746 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:10Z","lastTransitionTime":"2025-11-26T22:22:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.337962 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.338040 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.338063 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.338097 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.338120 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:10Z","lastTransitionTime":"2025-11-26T22:22:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.441181 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.441239 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.441258 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.441286 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.441304 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:10Z","lastTransitionTime":"2025-11-26T22:22:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.544044 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.544088 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.544105 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.544127 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.544143 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:10Z","lastTransitionTime":"2025-11-26T22:22:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.647225 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.647288 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.647308 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.647335 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.647354 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:10Z","lastTransitionTime":"2025-11-26T22:22:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.750584 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.750644 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.750661 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.750724 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.750744 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:10Z","lastTransitionTime":"2025-11-26T22:22:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.852750 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.852792 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.852800 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.852814 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.852824 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:10Z","lastTransitionTime":"2025-11-26T22:22:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.955768 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.955824 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.955841 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.955863 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:10 crc kubenswrapper[4903]: I1126 22:22:10.955880 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:10Z","lastTransitionTime":"2025-11-26T22:22:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.028447 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.028535 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.028664 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:11 crc kubenswrapper[4903]: E1126 22:22:11.028648 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:22:11 crc kubenswrapper[4903]: E1126 22:22:11.028919 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:22:11 crc kubenswrapper[4903]: E1126 22:22:11.028987 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.060153 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.060234 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.060257 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.060288 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.060309 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:11Z","lastTransitionTime":"2025-11-26T22:22:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.164334 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.164429 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.164450 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.164484 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.164508 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:11Z","lastTransitionTime":"2025-11-26T22:22:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.267075 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.267129 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.267147 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.267203 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.267221 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:11Z","lastTransitionTime":"2025-11-26T22:22:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.369896 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.369974 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.370002 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.370031 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.370050 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:11Z","lastTransitionTime":"2025-11-26T22:22:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.473219 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.473290 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.473307 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.473333 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.473353 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:11Z","lastTransitionTime":"2025-11-26T22:22:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.575755 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.575800 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.575809 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.575823 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.575832 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:11Z","lastTransitionTime":"2025-11-26T22:22:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.678578 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.678646 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.678669 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.678745 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.678770 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:11Z","lastTransitionTime":"2025-11-26T22:22:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.782059 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.782132 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.782156 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.782187 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.782211 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:11Z","lastTransitionTime":"2025-11-26T22:22:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.885150 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.885203 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.885220 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.885243 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.885260 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:11Z","lastTransitionTime":"2025-11-26T22:22:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.988443 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.988508 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.988525 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.988552 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:11 crc kubenswrapper[4903]: I1126 22:22:11.988570 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:11Z","lastTransitionTime":"2025-11-26T22:22:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.027789 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:12 crc kubenswrapper[4903]: E1126 22:22:12.028006 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.046042 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43088254-0fdc-4d7a-87cb-92306a8dc3ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8c2e4e68e3ec73eb79683a1de97bb7b5ef13c48db46c21b18f4bcee08be716a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3023824bcd9e429032a9d99b81a4b821b5c9382cefc352f53ad75a87a1088d66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7612eb84c353536c5410b0149cdd9a96cd3a47311a0a49f0865d9f5a1d5dd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5140752950b03c2f8e78fb277116a8ef2df734ac613cf7b1af4c78876547c8ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5140752950b03c2f8e78fb277116a8ef2df734ac613cf7b1af4c78876547c8ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:12Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.067174 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:12Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.083216 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:12Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.093973 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.094034 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.094087 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.094117 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.094136 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:12Z","lastTransitionTime":"2025-11-26T22:22:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.097732 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:12Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.114839 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"18039c76-4e57-465f-9918-e618c823dff7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eecaea6ba41a970d1434000705f94cc7c998d76bc9b0721d5955e7bea7dde57a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8876ce44b1ced5ec6305e0a4645258247d129d9d4895fb51fa7bd91c990e64f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ft62j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:12Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.136522 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:12Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.155566 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:12Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.176453 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-q8dvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef28737-00fd-4738-ae1f-e02a5b974905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-q8dvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:12Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.197056 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.197114 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.197133 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.197157 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.197176 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:12Z","lastTransitionTime":"2025-11-26T22:22:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.199136 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:12Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.216729 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:12Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.237411 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa4d6694eef4e4b5bb0c06f961f4d9ad0670b58a9a450e98072559ed3ef57397\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:12Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.267110 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f8766f396e0cb9ddd96b27b6d8fc7527c9941d4880205766836e53652d5005d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f8766f396e0cb9ddd96b27b6d8fc7527c9941d4880205766836e53652d5005d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:22:00Z\\\",\\\"message\\\":\\\"[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/olm-operator-metrics\\\\\\\"}\\\\nI1126 22:21:59.996094 6549 services_controller.go:360] Finished syncing service olm-operator-metrics on namespace openshift-operator-lifecycle-manager for network=default : 1.571654ms\\\\nI1126 22:21:59.996100 6549 address_set.go:302] New(bf133528-8652-4c84-85ff-881f0afe9837/default-network-controller:EgressService:egresssvc-served-pods:v4/a13607449821398607916) with []\\\\nI1126 22:21:59.996165 6549 factory.go:1336] Added *v1.Node event handler 7\\\\nI1126 22:21:59.996226 6549 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1126 22:21:59.996567 6549 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1126 22:21:59.996607 6549 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"default/kubernetes\\\\\\\"}\\\\nI1126 22:21:59.996630 6549 services_controller.go:360] Finished syncing service kubernetes on namespace default for network=default : 1.690146ms\\\\nI1126 22:21:59.996675 6549 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1126 22:21:59.996753 6549 ovnkube.go:599] Stopped ovnkube\\\\nI1126 22:21:59.996794 6549 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 22:21:59.996872 6549 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:59Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-bbznt_openshift-ovn-kubernetes(ef55a921-a95f-4b2b-84b7-98c1082a1bb6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:12Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.281423 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:12Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.300576 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:12Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.301040 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.301105 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.301125 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.301153 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.301172 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:12Z","lastTransitionTime":"2025-11-26T22:22:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.318585 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:12Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.334166 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:12Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.356336 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:12Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.403959 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.404001 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.404017 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.404040 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.404057 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:12Z","lastTransitionTime":"2025-11-26T22:22:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.507723 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.507767 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.507802 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.507820 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.507833 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:12Z","lastTransitionTime":"2025-11-26T22:22:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.611201 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.611243 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.611252 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.611271 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.611281 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:12Z","lastTransitionTime":"2025-11-26T22:22:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.714745 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.715296 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.715318 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.715352 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.715375 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:12Z","lastTransitionTime":"2025-11-26T22:22:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.818589 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.818653 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.818671 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.818730 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.818748 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:12Z","lastTransitionTime":"2025-11-26T22:22:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.921975 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.922043 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.922061 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.922090 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:12 crc kubenswrapper[4903]: I1126 22:22:12.922108 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:12Z","lastTransitionTime":"2025-11-26T22:22:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.024991 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.025058 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.025077 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.025102 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.025121 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:13Z","lastTransitionTime":"2025-11-26T22:22:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.027469 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:13 crc kubenswrapper[4903]: E1126 22:22:13.027760 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.027900 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:13 crc kubenswrapper[4903]: E1126 22:22:13.028040 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.028136 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:13 crc kubenswrapper[4903]: E1126 22:22:13.028249 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.127997 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.128051 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.128070 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.128095 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.128115 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:13Z","lastTransitionTime":"2025-11-26T22:22:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.231060 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.231109 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.231126 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.231151 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.231168 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:13Z","lastTransitionTime":"2025-11-26T22:22:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.334156 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.334214 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.334238 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.334269 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.334291 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:13Z","lastTransitionTime":"2025-11-26T22:22:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.437493 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.437556 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.437576 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.437601 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.437619 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:13Z","lastTransitionTime":"2025-11-26T22:22:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.541044 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.541104 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.541120 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.541142 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.541160 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:13Z","lastTransitionTime":"2025-11-26T22:22:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.662083 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.662151 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.662174 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.662200 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.662221 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:13Z","lastTransitionTime":"2025-11-26T22:22:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.764756 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.764818 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.764840 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.764865 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.764883 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:13Z","lastTransitionTime":"2025-11-26T22:22:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.868427 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.868480 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.868492 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.868510 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.868525 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:13Z","lastTransitionTime":"2025-11-26T22:22:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.971627 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.971735 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.971756 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.971784 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:13 crc kubenswrapper[4903]: I1126 22:22:13.971803 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:13Z","lastTransitionTime":"2025-11-26T22:22:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.028509 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:14 crc kubenswrapper[4903]: E1126 22:22:14.028787 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.075106 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.075162 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.075182 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.075207 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.075225 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:14Z","lastTransitionTime":"2025-11-26T22:22:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.178521 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.178565 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.178581 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.178601 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.178616 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:14Z","lastTransitionTime":"2025-11-26T22:22:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.281171 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.281221 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.281240 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.281266 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.281284 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:14Z","lastTransitionTime":"2025-11-26T22:22:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.385335 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.385405 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.385427 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.385456 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.385478 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:14Z","lastTransitionTime":"2025-11-26T22:22:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.488664 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.488798 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.488820 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.488849 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.488872 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:14Z","lastTransitionTime":"2025-11-26T22:22:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.591873 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.591936 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.591958 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.591988 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.592010 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:14Z","lastTransitionTime":"2025-11-26T22:22:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.694221 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.694268 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.694285 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.694307 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.694323 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:14Z","lastTransitionTime":"2025-11-26T22:22:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.796463 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.796508 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.796527 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.796548 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.796564 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:14Z","lastTransitionTime":"2025-11-26T22:22:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.900062 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.900136 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.900161 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.900195 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:14 crc kubenswrapper[4903]: I1126 22:22:14.900219 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:14Z","lastTransitionTime":"2025-11-26T22:22:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.003385 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.003451 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.003470 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.003496 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.003515 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:15Z","lastTransitionTime":"2025-11-26T22:22:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.028366 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:15 crc kubenswrapper[4903]: E1126 22:22:15.028567 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.028917 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:15 crc kubenswrapper[4903]: E1126 22:22:15.029062 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.029334 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:15 crc kubenswrapper[4903]: E1126 22:22:15.029485 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.107293 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.107386 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.107410 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.107786 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.107807 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:15Z","lastTransitionTime":"2025-11-26T22:22:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.211155 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.211219 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.211236 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.211266 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.211290 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:15Z","lastTransitionTime":"2025-11-26T22:22:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.313924 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.313984 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.314007 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.314036 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.314055 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:15Z","lastTransitionTime":"2025-11-26T22:22:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.417068 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.417136 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.417153 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.417175 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.417196 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:15Z","lastTransitionTime":"2025-11-26T22:22:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.520058 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.520088 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.520096 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.520111 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.520119 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:15Z","lastTransitionTime":"2025-11-26T22:22:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.623152 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.623202 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.623213 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.623229 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.623242 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:15Z","lastTransitionTime":"2025-11-26T22:22:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.726490 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.726531 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.726548 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.726564 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.726575 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:15Z","lastTransitionTime":"2025-11-26T22:22:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.829494 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.829567 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.829584 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.829609 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.829627 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:15Z","lastTransitionTime":"2025-11-26T22:22:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.932989 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.933049 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.933064 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.933083 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:15 crc kubenswrapper[4903]: I1126 22:22:15.933097 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:15Z","lastTransitionTime":"2025-11-26T22:22:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.027788 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:16 crc kubenswrapper[4903]: E1126 22:22:16.028278 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.028782 4903 scope.go:117] "RemoveContainer" containerID="4f8766f396e0cb9ddd96b27b6d8fc7527c9941d4880205766836e53652d5005d" Nov 26 22:22:16 crc kubenswrapper[4903]: E1126 22:22:16.029004 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-bbznt_openshift-ovn-kubernetes(ef55a921-a95f-4b2b-84b7-98c1082a1bb6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.035129 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.035182 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.035205 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.035229 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.035245 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:16Z","lastTransitionTime":"2025-11-26T22:22:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.137270 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.137305 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.137313 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.137328 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.137338 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:16Z","lastTransitionTime":"2025-11-26T22:22:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.240212 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.240316 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.240335 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.240403 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.240424 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:16Z","lastTransitionTime":"2025-11-26T22:22:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.343112 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.343150 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.343160 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.343176 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.343188 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:16Z","lastTransitionTime":"2025-11-26T22:22:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.446003 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.446066 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.446089 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.446118 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.446141 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:16Z","lastTransitionTime":"2025-11-26T22:22:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.548876 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.548943 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.548961 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.548987 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.549003 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:16Z","lastTransitionTime":"2025-11-26T22:22:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.651492 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.651549 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.651566 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.651592 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.651609 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:16Z","lastTransitionTime":"2025-11-26T22:22:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.754453 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.754495 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.754504 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.754519 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.754532 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:16Z","lastTransitionTime":"2025-11-26T22:22:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.857439 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.857504 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.857514 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.857537 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.857548 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:16Z","lastTransitionTime":"2025-11-26T22:22:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.961139 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.961190 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.961200 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.961219 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:16 crc kubenswrapper[4903]: I1126 22:22:16.961234 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:16Z","lastTransitionTime":"2025-11-26T22:22:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.027585 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.027630 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:17 crc kubenswrapper[4903]: E1126 22:22:17.027716 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:22:17 crc kubenswrapper[4903]: E1126 22:22:17.027814 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.027585 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:17 crc kubenswrapper[4903]: E1126 22:22:17.027882 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.063992 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.064054 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.064072 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.064096 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.064112 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:17Z","lastTransitionTime":"2025-11-26T22:22:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.166444 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.166516 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.166534 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.166562 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.166580 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:17Z","lastTransitionTime":"2025-11-26T22:22:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.269508 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.269574 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.269591 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.269618 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.269635 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:17Z","lastTransitionTime":"2025-11-26T22:22:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.372766 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.372810 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.372825 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.372842 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.372855 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:17Z","lastTransitionTime":"2025-11-26T22:22:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.475221 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.475284 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.475295 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.475315 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.475326 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:17Z","lastTransitionTime":"2025-11-26T22:22:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.577522 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.577576 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.577594 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.577622 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.577640 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:17Z","lastTransitionTime":"2025-11-26T22:22:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.679970 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.680003 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.680014 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.680028 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.680039 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:17Z","lastTransitionTime":"2025-11-26T22:22:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.782800 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.782852 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.782865 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.782883 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.782894 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:17Z","lastTransitionTime":"2025-11-26T22:22:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.885901 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.885940 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.885948 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.885962 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.885971 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:17Z","lastTransitionTime":"2025-11-26T22:22:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.989124 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.989203 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.989222 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.989252 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:17 crc kubenswrapper[4903]: I1126 22:22:17.989280 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:17Z","lastTransitionTime":"2025-11-26T22:22:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.028378 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:18 crc kubenswrapper[4903]: E1126 22:22:18.028563 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.091308 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.091353 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.091366 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.091383 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.091394 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:18Z","lastTransitionTime":"2025-11-26T22:22:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.193585 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.193651 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.193668 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.193728 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.193748 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:18Z","lastTransitionTime":"2025-11-26T22:22:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.296214 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.296271 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.296287 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.296310 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.296328 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:18Z","lastTransitionTime":"2025-11-26T22:22:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.399944 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.400021 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.400039 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.400066 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.400084 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:18Z","lastTransitionTime":"2025-11-26T22:22:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.502808 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.502865 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.502877 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.502901 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.502917 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:18Z","lastTransitionTime":"2025-11-26T22:22:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.606095 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.606169 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.606186 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.606212 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.606231 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:18Z","lastTransitionTime":"2025-11-26T22:22:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.709463 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.709638 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.709659 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.709715 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.709735 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:18Z","lastTransitionTime":"2025-11-26T22:22:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.812477 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.812533 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.812542 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.812558 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.812568 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:18Z","lastTransitionTime":"2025-11-26T22:22:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.915742 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.915785 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.915795 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.915815 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:18 crc kubenswrapper[4903]: I1126 22:22:18.915827 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:18Z","lastTransitionTime":"2025-11-26T22:22:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.019033 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.019076 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.019088 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.019108 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.019121 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:19Z","lastTransitionTime":"2025-11-26T22:22:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.027920 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.027956 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:19 crc kubenswrapper[4903]: E1126 22:22:19.028056 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.028173 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:19 crc kubenswrapper[4903]: E1126 22:22:19.028323 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:22:19 crc kubenswrapper[4903]: E1126 22:22:19.028389 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.122123 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.122168 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.122181 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.122198 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.122209 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:19Z","lastTransitionTime":"2025-11-26T22:22:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.225325 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.225388 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.225409 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.225440 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.225460 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:19Z","lastTransitionTime":"2025-11-26T22:22:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.328367 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.328411 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.328423 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.328441 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.328455 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:19Z","lastTransitionTime":"2025-11-26T22:22:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.431916 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.432025 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.432049 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.432080 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.432102 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:19Z","lastTransitionTime":"2025-11-26T22:22:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.534774 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.534865 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.534885 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.534910 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.534927 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:19Z","lastTransitionTime":"2025-11-26T22:22:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.637850 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.637902 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.637916 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.637933 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.637943 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:19Z","lastTransitionTime":"2025-11-26T22:22:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.740748 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.740789 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.740798 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.740814 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.740823 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:19Z","lastTransitionTime":"2025-11-26T22:22:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.843361 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.843401 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.843414 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.843432 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.843444 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:19Z","lastTransitionTime":"2025-11-26T22:22:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.946180 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.946238 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.946250 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.946272 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:19 crc kubenswrapper[4903]: I1126 22:22:19.946288 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:19Z","lastTransitionTime":"2025-11-26T22:22:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.028247 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:20 crc kubenswrapper[4903]: E1126 22:22:20.028409 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.048570 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.048644 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.048664 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.048737 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.048765 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:20Z","lastTransitionTime":"2025-11-26T22:22:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.059846 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.059916 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.059934 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.059959 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.059976 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:20Z","lastTransitionTime":"2025-11-26T22:22:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:20 crc kubenswrapper[4903]: E1126 22:22:20.075204 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:20Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.079583 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.079616 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.079642 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.079659 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.079668 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:20Z","lastTransitionTime":"2025-11-26T22:22:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:20 crc kubenswrapper[4903]: E1126 22:22:20.095092 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:20Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.098244 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.098265 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.098273 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.098287 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.098298 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:20Z","lastTransitionTime":"2025-11-26T22:22:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:20 crc kubenswrapper[4903]: E1126 22:22:20.110332 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:20Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.113665 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.113755 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.113767 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.113807 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.113830 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:20Z","lastTransitionTime":"2025-11-26T22:22:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:20 crc kubenswrapper[4903]: E1126 22:22:20.125328 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:20Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.128857 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.128912 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.128929 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.128952 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.128964 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:20Z","lastTransitionTime":"2025-11-26T22:22:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:20 crc kubenswrapper[4903]: E1126 22:22:20.148025 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:20Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:20 crc kubenswrapper[4903]: E1126 22:22:20.148186 4903 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.151333 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.151364 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.151377 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.151396 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.151410 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:20Z","lastTransitionTime":"2025-11-26T22:22:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.254017 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.254069 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.254082 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.254100 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.254112 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:20Z","lastTransitionTime":"2025-11-26T22:22:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.356873 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.356920 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.356938 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.356962 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.357173 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:20Z","lastTransitionTime":"2025-11-26T22:22:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.460066 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.460109 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.460125 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.460147 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.460166 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:20Z","lastTransitionTime":"2025-11-26T22:22:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.562841 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.562880 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.562897 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.562919 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.562934 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:20Z","lastTransitionTime":"2025-11-26T22:22:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.665608 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.665668 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.665685 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.665741 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.665759 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:20Z","lastTransitionTime":"2025-11-26T22:22:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.768668 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.768769 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.768789 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.768818 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.768838 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:20Z","lastTransitionTime":"2025-11-26T22:22:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.844671 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/aef28737-00fd-4738-ae1f-e02a5b974905-metrics-certs\") pod \"network-metrics-daemon-q8dvw\" (UID: \"aef28737-00fd-4738-ae1f-e02a5b974905\") " pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:20 crc kubenswrapper[4903]: E1126 22:22:20.845007 4903 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 22:22:20 crc kubenswrapper[4903]: E1126 22:22:20.845205 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/aef28737-00fd-4738-ae1f-e02a5b974905-metrics-certs podName:aef28737-00fd-4738-ae1f-e02a5b974905 nodeName:}" failed. No retries permitted until 2025-11-26 22:22:52.845156494 +0000 UTC m=+101.535391594 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/aef28737-00fd-4738-ae1f-e02a5b974905-metrics-certs") pod "network-metrics-daemon-q8dvw" (UID: "aef28737-00fd-4738-ae1f-e02a5b974905") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.872026 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.872134 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.872153 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.872184 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.872205 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:20Z","lastTransitionTime":"2025-11-26T22:22:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.975570 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.975621 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.975642 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.975666 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:20 crc kubenswrapper[4903]: I1126 22:22:20.975720 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:20Z","lastTransitionTime":"2025-11-26T22:22:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.027595 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.027664 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:21 crc kubenswrapper[4903]: E1126 22:22:21.027859 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.028068 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:21 crc kubenswrapper[4903]: E1126 22:22:21.028108 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:22:21 crc kubenswrapper[4903]: E1126 22:22:21.028542 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.079236 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.079300 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.079321 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.079347 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.079370 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:21Z","lastTransitionTime":"2025-11-26T22:22:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.182515 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.182580 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.182597 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.182622 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.182640 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:21Z","lastTransitionTime":"2025-11-26T22:22:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.285037 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.285104 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.285123 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.285148 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.285168 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:21Z","lastTransitionTime":"2025-11-26T22:22:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.389320 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.389400 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.389423 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.389448 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.389467 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:21Z","lastTransitionTime":"2025-11-26T22:22:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.492573 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.492624 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.492637 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.492662 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.492675 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:21Z","lastTransitionTime":"2025-11-26T22:22:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.594576 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.594632 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.594651 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.594674 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.594717 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:21Z","lastTransitionTime":"2025-11-26T22:22:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.697454 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.697495 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.697503 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.697517 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.697528 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:21Z","lastTransitionTime":"2025-11-26T22:22:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.799252 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.799305 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.799322 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.799344 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.799362 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:21Z","lastTransitionTime":"2025-11-26T22:22:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.902336 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.902377 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.902387 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.902402 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:21 crc kubenswrapper[4903]: I1126 22:22:21.902412 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:21Z","lastTransitionTime":"2025-11-26T22:22:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.005283 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.005320 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.005330 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.005343 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.005352 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:22Z","lastTransitionTime":"2025-11-26T22:22:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.042495 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:22 crc kubenswrapper[4903]: E1126 22:22:22.042661 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.063777 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f8766f396e0cb9ddd96b27b6d8fc7527c9941d4880205766836e53652d5005d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f8766f396e0cb9ddd96b27b6d8fc7527c9941d4880205766836e53652d5005d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:22:00Z\\\",\\\"message\\\":\\\"[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/olm-operator-metrics\\\\\\\"}\\\\nI1126 22:21:59.996094 6549 services_controller.go:360] Finished syncing service olm-operator-metrics on namespace openshift-operator-lifecycle-manager for network=default : 1.571654ms\\\\nI1126 22:21:59.996100 6549 address_set.go:302] New(bf133528-8652-4c84-85ff-881f0afe9837/default-network-controller:EgressService:egresssvc-served-pods:v4/a13607449821398607916) with []\\\\nI1126 22:21:59.996165 6549 factory.go:1336] Added *v1.Node event handler 7\\\\nI1126 22:21:59.996226 6549 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1126 22:21:59.996567 6549 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1126 22:21:59.996607 6549 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"default/kubernetes\\\\\\\"}\\\\nI1126 22:21:59.996630 6549 services_controller.go:360] Finished syncing service kubernetes on namespace default for network=default : 1.690146ms\\\\nI1126 22:21:59.996675 6549 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1126 22:21:59.996753 6549 ovnkube.go:599] Stopped ovnkube\\\\nI1126 22:21:59.996794 6549 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 22:21:59.996872 6549 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:59Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-bbznt_openshift-ovn-kubernetes(ef55a921-a95f-4b2b-84b7-98c1082a1bb6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.073415 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.085896 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.100607 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.112113 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.112163 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.112174 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.112189 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.112198 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:22Z","lastTransitionTime":"2025-11-26T22:22:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.115804 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa4d6694eef4e4b5bb0c06f961f4d9ad0670b58a9a450e98072559ed3ef57397\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.131499 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.143325 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.157295 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.170849 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.187067 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43088254-0fdc-4d7a-87cb-92306a8dc3ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8c2e4e68e3ec73eb79683a1de97bb7b5ef13c48db46c21b18f4bcee08be716a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3023824bcd9e429032a9d99b81a4b821b5c9382cefc352f53ad75a87a1088d66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7612eb84c353536c5410b0149cdd9a96cd3a47311a0a49f0865d9f5a1d5dd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5140752950b03c2f8e78fb277116a8ef2df734ac613cf7b1af4c78876547c8ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5140752950b03c2f8e78fb277116a8ef2df734ac613cf7b1af4c78876547c8ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.203857 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.214796 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.214825 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.214837 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.214851 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.214863 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:22Z","lastTransitionTime":"2025-11-26T22:22:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.219193 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.231144 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.246932 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"18039c76-4e57-465f-9918-e618c823dff7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eecaea6ba41a970d1434000705f94cc7c998d76bc9b0721d5955e7bea7dde57a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8876ce44b1ced5ec6305e0a4645258247d129d9d4895fb51fa7bd91c990e64f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ft62j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.263252 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.281794 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.296871 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-q8dvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef28737-00fd-4738-ae1f-e02a5b974905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-q8dvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.318007 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.318056 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.318069 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.318091 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.318107 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:22Z","lastTransitionTime":"2025-11-26T22:22:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.420356 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.420395 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.420409 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.420424 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.420433 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:22Z","lastTransitionTime":"2025-11-26T22:22:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.520302 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bxnsh_229974d7-7b78-434b-a346-8b9004e69bf2/kube-multus/0.log" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.520348 4903 generic.go:334] "Generic (PLEG): container finished" podID="229974d7-7b78-434b-a346-8b9004e69bf2" containerID="8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df" exitCode=1 Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.520375 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bxnsh" event={"ID":"229974d7-7b78-434b-a346-8b9004e69bf2","Type":"ContainerDied","Data":"8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df"} Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.520706 4903 scope.go:117] "RemoveContainer" containerID="8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.522729 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.522777 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.522795 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.522817 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.522833 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:22Z","lastTransitionTime":"2025-11-26T22:22:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.537888 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43088254-0fdc-4d7a-87cb-92306a8dc3ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8c2e4e68e3ec73eb79683a1de97bb7b5ef13c48db46c21b18f4bcee08be716a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3023824bcd9e429032a9d99b81a4b821b5c9382cefc352f53ad75a87a1088d66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7612eb84c353536c5410b0149cdd9a96cd3a47311a0a49f0865d9f5a1d5dd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5140752950b03c2f8e78fb277116a8ef2df734ac613cf7b1af4c78876547c8ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5140752950b03c2f8e78fb277116a8ef2df734ac613cf7b1af4c78876547c8ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.554479 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.574124 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.589712 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.606798 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"18039c76-4e57-465f-9918-e618c823dff7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eecaea6ba41a970d1434000705f94cc7c998d76bc9b0721d5955e7bea7dde57a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8876ce44b1ced5ec6305e0a4645258247d129d9d4895fb51fa7bd91c990e64f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ft62j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.624552 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.625506 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.625767 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.625950 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.626083 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.626248 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:22Z","lastTransitionTime":"2025-11-26T22:22:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.645353 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.659630 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-q8dvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef28737-00fd-4738-ae1f-e02a5b974905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-q8dvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.678072 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f8766f396e0cb9ddd96b27b6d8fc7527c9941d4880205766836e53652d5005d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f8766f396e0cb9ddd96b27b6d8fc7527c9941d4880205766836e53652d5005d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:22:00Z\\\",\\\"message\\\":\\\"[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/olm-operator-metrics\\\\\\\"}\\\\nI1126 22:21:59.996094 6549 services_controller.go:360] Finished syncing service olm-operator-metrics on namespace openshift-operator-lifecycle-manager for network=default : 1.571654ms\\\\nI1126 22:21:59.996100 6549 address_set.go:302] New(bf133528-8652-4c84-85ff-881f0afe9837/default-network-controller:EgressService:egresssvc-served-pods:v4/a13607449821398607916) with []\\\\nI1126 22:21:59.996165 6549 factory.go:1336] Added *v1.Node event handler 7\\\\nI1126 22:21:59.996226 6549 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1126 22:21:59.996567 6549 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1126 22:21:59.996607 6549 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"default/kubernetes\\\\\\\"}\\\\nI1126 22:21:59.996630 6549 services_controller.go:360] Finished syncing service kubernetes on namespace default for network=default : 1.690146ms\\\\nI1126 22:21:59.996675 6549 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1126 22:21:59.996753 6549 ovnkube.go:599] Stopped ovnkube\\\\nI1126 22:21:59.996794 6549 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 22:21:59.996872 6549 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:59Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-bbznt_openshift-ovn-kubernetes(ef55a921-a95f-4b2b-84b7-98c1082a1bb6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.689117 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.703810 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.716675 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.728580 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.728623 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.728640 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.728662 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.728679 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:22Z","lastTransitionTime":"2025-11-26T22:22:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.735030 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa4d6694eef4e4b5bb0c06f961f4d9ad0670b58a9a450e98072559ed3ef57397\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.750103 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.765528 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.778784 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.793444 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:22:22Z\\\",\\\"message\\\":\\\"2025-11-26T22:21:36+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_2b5e1290-d4c2-482e-9633-f17cd83ccefd\\\\n2025-11-26T22:21:36+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_2b5e1290-d4c2-482e-9633-f17cd83ccefd to /host/opt/cni/bin/\\\\n2025-11-26T22:21:37Z [verbose] multus-daemon started\\\\n2025-11-26T22:21:37Z [verbose] Readiness Indicator file check\\\\n2025-11-26T22:22:22Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:22Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.830866 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.830894 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.830902 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.830914 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.830930 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:22Z","lastTransitionTime":"2025-11-26T22:22:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.932406 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.932457 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.932470 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.932487 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:22 crc kubenswrapper[4903]: I1126 22:22:22.932499 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:22Z","lastTransitionTime":"2025-11-26T22:22:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.028342 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.028360 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.028456 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:23 crc kubenswrapper[4903]: E1126 22:22:23.028509 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:22:23 crc kubenswrapper[4903]: E1126 22:22:23.028635 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:22:23 crc kubenswrapper[4903]: E1126 22:22:23.028685 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.038429 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.038456 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.038465 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.038477 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.038486 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:23Z","lastTransitionTime":"2025-11-26T22:22:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.141550 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.141589 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.141599 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.141617 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.141628 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:23Z","lastTransitionTime":"2025-11-26T22:22:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.243919 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.243968 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.243980 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.243997 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.244009 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:23Z","lastTransitionTime":"2025-11-26T22:22:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.347879 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.347927 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.347936 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.347953 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.347964 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:23Z","lastTransitionTime":"2025-11-26T22:22:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.458881 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.458916 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.458925 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.458940 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.458949 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:23Z","lastTransitionTime":"2025-11-26T22:22:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.525801 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bxnsh_229974d7-7b78-434b-a346-8b9004e69bf2/kube-multus/0.log" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.525846 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bxnsh" event={"ID":"229974d7-7b78-434b-a346-8b9004e69bf2","Type":"ContainerStarted","Data":"969475c82879642645019fdc1665aaf0dc4c167c07887a47d02b4d5be0b6d66a"} Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.545659 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:23Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.561021 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.561051 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.561060 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.561073 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.561082 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:23Z","lastTransitionTime":"2025-11-26T22:22:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.566272 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:23Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.578542 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:23Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.597756 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://969475c82879642645019fdc1665aaf0dc4c167c07887a47d02b4d5be0b6d66a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:22:22Z\\\",\\\"message\\\":\\\"2025-11-26T22:21:36+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_2b5e1290-d4c2-482e-9633-f17cd83ccefd\\\\n2025-11-26T22:21:36+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_2b5e1290-d4c2-482e-9633-f17cd83ccefd to /host/opt/cni/bin/\\\\n2025-11-26T22:21:37Z [verbose] multus-daemon started\\\\n2025-11-26T22:21:37Z [verbose] Readiness Indicator file check\\\\n2025-11-26T22:22:22Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:22:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:23Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.611239 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"18039c76-4e57-465f-9918-e618c823dff7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eecaea6ba41a970d1434000705f94cc7c998d76bc9b0721d5955e7bea7dde57a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8876ce44b1ced5ec6305e0a4645258247d129d9d4895fb51fa7bd91c990e64f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ft62j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:23Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.624373 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43088254-0fdc-4d7a-87cb-92306a8dc3ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8c2e4e68e3ec73eb79683a1de97bb7b5ef13c48db46c21b18f4bcee08be716a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3023824bcd9e429032a9d99b81a4b821b5c9382cefc352f53ad75a87a1088d66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7612eb84c353536c5410b0149cdd9a96cd3a47311a0a49f0865d9f5a1d5dd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5140752950b03c2f8e78fb277116a8ef2df734ac613cf7b1af4c78876547c8ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5140752950b03c2f8e78fb277116a8ef2df734ac613cf7b1af4c78876547c8ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:23Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.637183 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:23Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.651022 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:23Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.662120 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:23Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.664056 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.664100 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.664114 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.664132 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.664145 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:23Z","lastTransitionTime":"2025-11-26T22:22:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.711103 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:23Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.727452 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:23Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.744932 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-q8dvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef28737-00fd-4738-ae1f-e02a5b974905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-q8dvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:23Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.765302 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa4d6694eef4e4b5bb0c06f961f4d9ad0670b58a9a450e98072559ed3ef57397\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:23Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.767433 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.767487 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.767499 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.767516 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.767528 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:23Z","lastTransitionTime":"2025-11-26T22:22:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.790501 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f8766f396e0cb9ddd96b27b6d8fc7527c9941d4880205766836e53652d5005d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f8766f396e0cb9ddd96b27b6d8fc7527c9941d4880205766836e53652d5005d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:22:00Z\\\",\\\"message\\\":\\\"[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/olm-operator-metrics\\\\\\\"}\\\\nI1126 22:21:59.996094 6549 services_controller.go:360] Finished syncing service olm-operator-metrics on namespace openshift-operator-lifecycle-manager for network=default : 1.571654ms\\\\nI1126 22:21:59.996100 6549 address_set.go:302] New(bf133528-8652-4c84-85ff-881f0afe9837/default-network-controller:EgressService:egresssvc-served-pods:v4/a13607449821398607916) with []\\\\nI1126 22:21:59.996165 6549 factory.go:1336] Added *v1.Node event handler 7\\\\nI1126 22:21:59.996226 6549 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1126 22:21:59.996567 6549 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1126 22:21:59.996607 6549 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"default/kubernetes\\\\\\\"}\\\\nI1126 22:21:59.996630 6549 services_controller.go:360] Finished syncing service kubernetes on namespace default for network=default : 1.690146ms\\\\nI1126 22:21:59.996675 6549 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1126 22:21:59.996753 6549 ovnkube.go:599] Stopped ovnkube\\\\nI1126 22:21:59.996794 6549 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 22:21:59.996872 6549 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:59Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-bbznt_openshift-ovn-kubernetes(ef55a921-a95f-4b2b-84b7-98c1082a1bb6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:23Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.804802 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:23Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.820080 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:23Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.836449 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:23Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.870038 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.870099 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.870111 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.870130 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.870143 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:23Z","lastTransitionTime":"2025-11-26T22:22:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.973008 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.973054 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.973067 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.973085 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:23 crc kubenswrapper[4903]: I1126 22:22:23.973098 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:23Z","lastTransitionTime":"2025-11-26T22:22:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.027568 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:24 crc kubenswrapper[4903]: E1126 22:22:24.027718 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.075510 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.075580 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.075603 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.075624 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.075637 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:24Z","lastTransitionTime":"2025-11-26T22:22:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.178351 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.178395 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.178406 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.178422 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.178434 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:24Z","lastTransitionTime":"2025-11-26T22:22:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.281115 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.281169 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.281180 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.281196 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.281207 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:24Z","lastTransitionTime":"2025-11-26T22:22:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.383385 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.383431 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.383443 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.383459 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.383471 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:24Z","lastTransitionTime":"2025-11-26T22:22:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.485487 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.485525 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.485534 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.485548 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.485558 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:24Z","lastTransitionTime":"2025-11-26T22:22:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.588028 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.588114 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.588128 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.588147 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.588160 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:24Z","lastTransitionTime":"2025-11-26T22:22:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.690311 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.690352 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.690366 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.690382 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.690395 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:24Z","lastTransitionTime":"2025-11-26T22:22:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.793206 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.793239 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.793248 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.793262 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.793271 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:24Z","lastTransitionTime":"2025-11-26T22:22:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.895885 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.895929 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.895940 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.895956 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.895968 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:24Z","lastTransitionTime":"2025-11-26T22:22:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.999121 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.999176 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.999195 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.999223 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:24 crc kubenswrapper[4903]: I1126 22:22:24.999245 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:24Z","lastTransitionTime":"2025-11-26T22:22:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.028091 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.028151 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.028207 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:25 crc kubenswrapper[4903]: E1126 22:22:25.028360 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:22:25 crc kubenswrapper[4903]: E1126 22:22:25.029007 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:22:25 crc kubenswrapper[4903]: E1126 22:22:25.028888 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.105429 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.105482 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.105500 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.105521 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.105588 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:25Z","lastTransitionTime":"2025-11-26T22:22:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.209566 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.210048 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.210068 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.210093 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.210110 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:25Z","lastTransitionTime":"2025-11-26T22:22:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.312965 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.313021 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.313034 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.313052 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.313064 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:25Z","lastTransitionTime":"2025-11-26T22:22:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.415466 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.415511 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.415529 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.415553 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.415570 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:25Z","lastTransitionTime":"2025-11-26T22:22:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.518097 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.518163 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.518180 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.518209 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.518229 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:25Z","lastTransitionTime":"2025-11-26T22:22:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.622233 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.622296 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.622314 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.622339 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.622356 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:25Z","lastTransitionTime":"2025-11-26T22:22:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.725725 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.725788 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.725807 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.725831 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.725849 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:25Z","lastTransitionTime":"2025-11-26T22:22:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.829055 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.829137 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.829155 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.829179 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.829195 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:25Z","lastTransitionTime":"2025-11-26T22:22:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.932289 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.932355 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.932378 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.932403 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:25 crc kubenswrapper[4903]: I1126 22:22:25.932423 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:25Z","lastTransitionTime":"2025-11-26T22:22:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.028328 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:26 crc kubenswrapper[4903]: E1126 22:22:26.028513 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.036227 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.036289 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.036308 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.036330 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.036347 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:26Z","lastTransitionTime":"2025-11-26T22:22:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.138943 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.139020 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.139045 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.139075 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.139094 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:26Z","lastTransitionTime":"2025-11-26T22:22:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.241888 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.241938 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.241955 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.241983 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.242010 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:26Z","lastTransitionTime":"2025-11-26T22:22:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.344630 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.344717 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.344741 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.344770 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.344788 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:26Z","lastTransitionTime":"2025-11-26T22:22:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.447955 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.448020 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.448037 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.448063 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.448082 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:26Z","lastTransitionTime":"2025-11-26T22:22:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.550822 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.550906 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.550928 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.550961 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.550990 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:26Z","lastTransitionTime":"2025-11-26T22:22:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.653621 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.653684 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.653755 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.653786 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.653806 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:26Z","lastTransitionTime":"2025-11-26T22:22:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.756521 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.756581 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.756598 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.756622 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.756640 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:26Z","lastTransitionTime":"2025-11-26T22:22:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.859376 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.859436 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.859453 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.859476 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.859493 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:26Z","lastTransitionTime":"2025-11-26T22:22:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.962903 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.962984 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.963001 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.963035 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:26 crc kubenswrapper[4903]: I1126 22:22:26.963053 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:26Z","lastTransitionTime":"2025-11-26T22:22:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.027625 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.027771 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:27 crc kubenswrapper[4903]: E1126 22:22:27.027825 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.027635 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:27 crc kubenswrapper[4903]: E1126 22:22:27.027972 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:22:27 crc kubenswrapper[4903]: E1126 22:22:27.028140 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.066133 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.066188 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.066205 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.066230 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.066247 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:27Z","lastTransitionTime":"2025-11-26T22:22:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.169311 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.169375 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.169393 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.169426 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.169444 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:27Z","lastTransitionTime":"2025-11-26T22:22:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.272844 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.272913 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.272932 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.272959 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.272986 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:27Z","lastTransitionTime":"2025-11-26T22:22:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.375942 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.376025 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.376045 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.376071 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.376091 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:27Z","lastTransitionTime":"2025-11-26T22:22:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.478928 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.478974 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.478990 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.479013 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.479029 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:27Z","lastTransitionTime":"2025-11-26T22:22:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.581598 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.581648 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.581665 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.581686 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.581730 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:27Z","lastTransitionTime":"2025-11-26T22:22:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.685732 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.685819 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.685837 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.685862 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.685886 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:27Z","lastTransitionTime":"2025-11-26T22:22:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.789215 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.789278 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.789296 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.789323 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.789352 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:27Z","lastTransitionTime":"2025-11-26T22:22:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.892975 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.893119 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.893214 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.893245 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.893263 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:27Z","lastTransitionTime":"2025-11-26T22:22:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.997053 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.997109 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.997128 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.997153 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:27 crc kubenswrapper[4903]: I1126 22:22:27.997170 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:27Z","lastTransitionTime":"2025-11-26T22:22:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.028277 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:28 crc kubenswrapper[4903]: E1126 22:22:28.028457 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.101154 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.101206 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.101223 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.101245 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.101262 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:28Z","lastTransitionTime":"2025-11-26T22:22:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.205265 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.205311 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.205328 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.205350 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.205366 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:28Z","lastTransitionTime":"2025-11-26T22:22:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.307795 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.307844 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.307861 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.307884 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.307903 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:28Z","lastTransitionTime":"2025-11-26T22:22:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.410785 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.410836 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.410853 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.410872 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.410883 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:28Z","lastTransitionTime":"2025-11-26T22:22:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.513392 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.513448 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.513466 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.513488 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.513505 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:28Z","lastTransitionTime":"2025-11-26T22:22:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.616247 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.616352 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.616372 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.616406 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.616428 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:28Z","lastTransitionTime":"2025-11-26T22:22:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.719645 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.719769 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.719802 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.719834 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.719855 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:28Z","lastTransitionTime":"2025-11-26T22:22:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.822860 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.822929 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.822945 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.822969 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.822986 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:28Z","lastTransitionTime":"2025-11-26T22:22:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.925927 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.926012 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.926036 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.926068 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:28 crc kubenswrapper[4903]: I1126 22:22:28.926092 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:28Z","lastTransitionTime":"2025-11-26T22:22:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.027405 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.027527 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:29 crc kubenswrapper[4903]: E1126 22:22:29.027835 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.027893 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:29 crc kubenswrapper[4903]: E1126 22:22:29.028009 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:22:29 crc kubenswrapper[4903]: E1126 22:22:29.028225 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.028661 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.028713 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.028728 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.028746 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.028772 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:29Z","lastTransitionTime":"2025-11-26T22:22:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.131451 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.131508 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.131520 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.131538 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.131550 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:29Z","lastTransitionTime":"2025-11-26T22:22:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.233800 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.233845 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.233857 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.233874 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.233886 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:29Z","lastTransitionTime":"2025-11-26T22:22:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.336520 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.336570 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.336587 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.336610 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.336629 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:29Z","lastTransitionTime":"2025-11-26T22:22:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.440051 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.440103 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.440114 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.440132 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.440146 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:29Z","lastTransitionTime":"2025-11-26T22:22:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.542953 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.543018 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.543039 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.543079 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.543102 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:29Z","lastTransitionTime":"2025-11-26T22:22:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.646133 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.646175 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.646192 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.646215 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.646233 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:29Z","lastTransitionTime":"2025-11-26T22:22:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.749566 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.749621 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.749642 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.749671 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.749751 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:29Z","lastTransitionTime":"2025-11-26T22:22:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.852571 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.852666 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.852685 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.852737 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.852754 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:29Z","lastTransitionTime":"2025-11-26T22:22:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.954966 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.955022 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.955041 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.955070 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:29 crc kubenswrapper[4903]: I1126 22:22:29.955103 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:29Z","lastTransitionTime":"2025-11-26T22:22:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.045811 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:30 crc kubenswrapper[4903]: E1126 22:22:30.046049 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.046954 4903 scope.go:117] "RemoveContainer" containerID="4f8766f396e0cb9ddd96b27b6d8fc7527c9941d4880205766836e53652d5005d" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.058133 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.058202 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.058244 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.058273 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.058294 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:30Z","lastTransitionTime":"2025-11-26T22:22:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.161558 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.161598 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.161607 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.161624 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.161636 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:30Z","lastTransitionTime":"2025-11-26T22:22:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.264466 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.264527 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.264546 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.264570 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.264588 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:30Z","lastTransitionTime":"2025-11-26T22:22:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.368031 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.368099 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.368117 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.368146 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.368164 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:30Z","lastTransitionTime":"2025-11-26T22:22:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.447856 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.447923 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.447941 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.447968 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.447987 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:30Z","lastTransitionTime":"2025-11-26T22:22:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:30 crc kubenswrapper[4903]: E1126 22:22:30.471868 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:30Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.478643 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.478696 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.478735 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.478758 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.478775 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:30Z","lastTransitionTime":"2025-11-26T22:22:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:30 crc kubenswrapper[4903]: E1126 22:22:30.499647 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:30Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.505016 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.505070 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.505090 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.505116 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.505135 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:30Z","lastTransitionTime":"2025-11-26T22:22:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:30 crc kubenswrapper[4903]: E1126 22:22:30.530973 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:30Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.535935 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.536003 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.536026 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.536054 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.536078 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:30Z","lastTransitionTime":"2025-11-26T22:22:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.552585 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bbznt_ef55a921-a95f-4b2b-84b7-98c1082a1bb6/ovnkube-controller/2.log" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.557029 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" event={"ID":"ef55a921-a95f-4b2b-84b7-98c1082a1bb6","Type":"ContainerStarted","Data":"93ba50c6aca389bf5400db19615fbeb2a035b284237a1fdbfa1d6e7d4190d06f"} Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.557955 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:22:30 crc kubenswrapper[4903]: E1126 22:22:30.570710 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:30Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.575622 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.575680 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.575738 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.575766 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.575812 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:30Z","lastTransitionTime":"2025-11-26T22:22:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.592574 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:30Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:30 crc kubenswrapper[4903]: E1126 22:22:30.597603 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:30Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:30 crc kubenswrapper[4903]: E1126 22:22:30.597771 4903 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.599301 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.599354 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.599372 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.599400 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.599418 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:30Z","lastTransitionTime":"2025-11-26T22:22:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.612700 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:30Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.632447 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa4d6694eef4e4b5bb0c06f961f4d9ad0670b58a9a450e98072559ed3ef57397\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:30Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.661309 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93ba50c6aca389bf5400db19615fbeb2a035b284237a1fdbfa1d6e7d4190d06f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f8766f396e0cb9ddd96b27b6d8fc7527c9941d4880205766836e53652d5005d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:22:00Z\\\",\\\"message\\\":\\\"[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/olm-operator-metrics\\\\\\\"}\\\\nI1126 22:21:59.996094 6549 services_controller.go:360] Finished syncing service olm-operator-metrics on namespace openshift-operator-lifecycle-manager for network=default : 1.571654ms\\\\nI1126 22:21:59.996100 6549 address_set.go:302] New(bf133528-8652-4c84-85ff-881f0afe9837/default-network-controller:EgressService:egresssvc-served-pods:v4/a13607449821398607916) with []\\\\nI1126 22:21:59.996165 6549 factory.go:1336] Added *v1.Node event handler 7\\\\nI1126 22:21:59.996226 6549 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1126 22:21:59.996567 6549 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1126 22:21:59.996607 6549 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"default/kubernetes\\\\\\\"}\\\\nI1126 22:21:59.996630 6549 services_controller.go:360] Finished syncing service kubernetes on namespace default for network=default : 1.690146ms\\\\nI1126 22:21:59.996675 6549 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1126 22:21:59.996753 6549 ovnkube.go:599] Stopped ovnkube\\\\nI1126 22:21:59.996794 6549 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 22:21:59.996872 6549 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:59Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:22:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:30Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.674193 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:30Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.688962 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:30Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.701313 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:30Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.701824 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.701865 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.701881 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.701899 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.701914 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:30Z","lastTransitionTime":"2025-11-26T22:22:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.716400 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:30Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.733980 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://969475c82879642645019fdc1665aaf0dc4c167c07887a47d02b4d5be0b6d66a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:22:22Z\\\",\\\"message\\\":\\\"2025-11-26T22:21:36+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_2b5e1290-d4c2-482e-9633-f17cd83ccefd\\\\n2025-11-26T22:21:36+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_2b5e1290-d4c2-482e-9633-f17cd83ccefd to /host/opt/cni/bin/\\\\n2025-11-26T22:21:37Z [verbose] multus-daemon started\\\\n2025-11-26T22:21:37Z [verbose] Readiness Indicator file check\\\\n2025-11-26T22:22:22Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:22:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:30Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.748989 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43088254-0fdc-4d7a-87cb-92306a8dc3ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8c2e4e68e3ec73eb79683a1de97bb7b5ef13c48db46c21b18f4bcee08be716a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3023824bcd9e429032a9d99b81a4b821b5c9382cefc352f53ad75a87a1088d66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7612eb84c353536c5410b0149cdd9a96cd3a47311a0a49f0865d9f5a1d5dd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5140752950b03c2f8e78fb277116a8ef2df734ac613cf7b1af4c78876547c8ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5140752950b03c2f8e78fb277116a8ef2df734ac613cf7b1af4c78876547c8ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:30Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.764539 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:30Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.778200 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:30Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.791277 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:30Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.802891 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"18039c76-4e57-465f-9918-e618c823dff7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eecaea6ba41a970d1434000705f94cc7c998d76bc9b0721d5955e7bea7dde57a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8876ce44b1ced5ec6305e0a4645258247d129d9d4895fb51fa7bd91c990e64f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ft62j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:30Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.804937 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.804971 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.804983 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.805010 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.805024 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:30Z","lastTransitionTime":"2025-11-26T22:22:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.824324 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:30Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.836539 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:30Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.851848 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-q8dvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef28737-00fd-4738-ae1f-e02a5b974905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-q8dvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:30Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.907739 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.907808 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.907827 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.907853 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:30 crc kubenswrapper[4903]: I1126 22:22:30.907871 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:30Z","lastTransitionTime":"2025-11-26T22:22:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.010067 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.010131 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.010149 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.010175 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.010193 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:31Z","lastTransitionTime":"2025-11-26T22:22:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.027386 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.027461 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.027467 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:31 crc kubenswrapper[4903]: E1126 22:22:31.027528 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:22:31 crc kubenswrapper[4903]: E1126 22:22:31.027615 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:22:31 crc kubenswrapper[4903]: E1126 22:22:31.027790 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.112788 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.112831 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.112842 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.112860 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.112873 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:31Z","lastTransitionTime":"2025-11-26T22:22:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.216183 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.216238 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.216257 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.216287 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.216311 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:31Z","lastTransitionTime":"2025-11-26T22:22:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.319286 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.319347 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.319364 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.319388 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.319407 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:31Z","lastTransitionTime":"2025-11-26T22:22:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.422111 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.422190 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.422216 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.422245 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.422267 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:31Z","lastTransitionTime":"2025-11-26T22:22:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.525775 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.525821 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.525837 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.525859 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.525877 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:31Z","lastTransitionTime":"2025-11-26T22:22:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.564657 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bbznt_ef55a921-a95f-4b2b-84b7-98c1082a1bb6/ovnkube-controller/3.log" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.565609 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bbznt_ef55a921-a95f-4b2b-84b7-98c1082a1bb6/ovnkube-controller/2.log" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.569855 4903 generic.go:334] "Generic (PLEG): container finished" podID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerID="93ba50c6aca389bf5400db19615fbeb2a035b284237a1fdbfa1d6e7d4190d06f" exitCode=1 Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.569907 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" event={"ID":"ef55a921-a95f-4b2b-84b7-98c1082a1bb6","Type":"ContainerDied","Data":"93ba50c6aca389bf5400db19615fbeb2a035b284237a1fdbfa1d6e7d4190d06f"} Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.569953 4903 scope.go:117] "RemoveContainer" containerID="4f8766f396e0cb9ddd96b27b6d8fc7527c9941d4880205766836e53652d5005d" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.570939 4903 scope.go:117] "RemoveContainer" containerID="93ba50c6aca389bf5400db19615fbeb2a035b284237a1fdbfa1d6e7d4190d06f" Nov 26 22:22:31 crc kubenswrapper[4903]: E1126 22:22:31.571189 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-bbznt_openshift-ovn-kubernetes(ef55a921-a95f-4b2b-84b7-98c1082a1bb6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.590265 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:31Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.611967 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:31Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.628223 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.628287 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.628308 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.628334 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.628356 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:31Z","lastTransitionTime":"2025-11-26T22:22:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.631021 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:31Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.651306 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://969475c82879642645019fdc1665aaf0dc4c167c07887a47d02b4d5be0b6d66a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:22:22Z\\\",\\\"message\\\":\\\"2025-11-26T22:21:36+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_2b5e1290-d4c2-482e-9633-f17cd83ccefd\\\\n2025-11-26T22:21:36+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_2b5e1290-d4c2-482e-9633-f17cd83ccefd to /host/opt/cni/bin/\\\\n2025-11-26T22:21:37Z [verbose] multus-daemon started\\\\n2025-11-26T22:21:37Z [verbose] Readiness Indicator file check\\\\n2025-11-26T22:22:22Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:22:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:31Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.669006 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"18039c76-4e57-465f-9918-e618c823dff7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eecaea6ba41a970d1434000705f94cc7c998d76bc9b0721d5955e7bea7dde57a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8876ce44b1ced5ec6305e0a4645258247d129d9d4895fb51fa7bd91c990e64f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ft62j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:31Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.685220 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43088254-0fdc-4d7a-87cb-92306a8dc3ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8c2e4e68e3ec73eb79683a1de97bb7b5ef13c48db46c21b18f4bcee08be716a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3023824bcd9e429032a9d99b81a4b821b5c9382cefc352f53ad75a87a1088d66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7612eb84c353536c5410b0149cdd9a96cd3a47311a0a49f0865d9f5a1d5dd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5140752950b03c2f8e78fb277116a8ef2df734ac613cf7b1af4c78876547c8ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5140752950b03c2f8e78fb277116a8ef2df734ac613cf7b1af4c78876547c8ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:31Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.700900 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:31Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.715512 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:31Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.730993 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.731032 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.731048 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.731074 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.731090 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:31Z","lastTransitionTime":"2025-11-26T22:22:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.731259 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:31Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.756367 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:31Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.776271 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:31Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.790193 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-q8dvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef28737-00fd-4738-ae1f-e02a5b974905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-q8dvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:31Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.809804 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa4d6694eef4e4b5bb0c06f961f4d9ad0670b58a9a450e98072559ed3ef57397\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:31Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.832972 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93ba50c6aca389bf5400db19615fbeb2a035b284237a1fdbfa1d6e7d4190d06f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f8766f396e0cb9ddd96b27b6d8fc7527c9941d4880205766836e53652d5005d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:22:00Z\\\",\\\"message\\\":\\\"[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/olm-operator-metrics\\\\\\\"}\\\\nI1126 22:21:59.996094 6549 services_controller.go:360] Finished syncing service olm-operator-metrics on namespace openshift-operator-lifecycle-manager for network=default : 1.571654ms\\\\nI1126 22:21:59.996100 6549 address_set.go:302] New(bf133528-8652-4c84-85ff-881f0afe9837/default-network-controller:EgressService:egresssvc-served-pods:v4/a13607449821398607916) with []\\\\nI1126 22:21:59.996165 6549 factory.go:1336] Added *v1.Node event handler 7\\\\nI1126 22:21:59.996226 6549 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1126 22:21:59.996567 6549 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1126 22:21:59.996607 6549 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"default/kubernetes\\\\\\\"}\\\\nI1126 22:21:59.996630 6549 services_controller.go:360] Finished syncing service kubernetes on namespace default for network=default : 1.690146ms\\\\nI1126 22:21:59.996675 6549 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1126 22:21:59.996753 6549 ovnkube.go:599] Stopped ovnkube\\\\nI1126 22:21:59.996794 6549 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 22:21:59.996872 6549 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:59Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93ba50c6aca389bf5400db19615fbeb2a035b284237a1fdbfa1d6e7d4190d06f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:22:31Z\\\",\\\"message\\\":\\\" 22:22:31.088028 6909 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1126 22:22:31.088033 6909 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1126 22:22:31.088043 6909 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 22:22:31.088047 6909 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 22:22:31.088038 6909 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 22:22:31.088064 6909 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 22:22:31.088070 6909 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 22:22:31.088073 6909 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1126 22:22:31.088088 6909 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 22:22:31.088101 6909 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 22:22:31.088109 6909 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 22:22:31.088117 6909 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1126 22:22:31.088114 6909 factory.go:656] Stopping watch factory\\\\nI1126 22:22:31.088120 6909 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 22:22:31.088127 6909 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 22:22:31.088133 6909 handler.go:208] Removed *v1.EgressIP ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:22:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:31Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.833400 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.833457 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.833476 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.833503 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.833522 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:31Z","lastTransitionTime":"2025-11-26T22:22:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.845638 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:31Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.860367 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:31Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.877873 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:31Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.936787 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.936871 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.936890 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.936913 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:31 crc kubenswrapper[4903]: I1126 22:22:31.936929 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:31Z","lastTransitionTime":"2025-11-26T22:22:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.028748 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:32 crc kubenswrapper[4903]: E1126 22:22:32.028941 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.039535 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.039593 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.039610 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.039632 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.039651 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:32Z","lastTransitionTime":"2025-11-26T22:22:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.048000 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43088254-0fdc-4d7a-87cb-92306a8dc3ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8c2e4e68e3ec73eb79683a1de97bb7b5ef13c48db46c21b18f4bcee08be716a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3023824bcd9e429032a9d99b81a4b821b5c9382cefc352f53ad75a87a1088d66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7612eb84c353536c5410b0149cdd9a96cd3a47311a0a49f0865d9f5a1d5dd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5140752950b03c2f8e78fb277116a8ef2df734ac613cf7b1af4c78876547c8ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5140752950b03c2f8e78fb277116a8ef2df734ac613cf7b1af4c78876547c8ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.066985 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.085812 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.101188 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.119198 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"18039c76-4e57-465f-9918-e618c823dff7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eecaea6ba41a970d1434000705f94cc7c998d76bc9b0721d5955e7bea7dde57a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8876ce44b1ced5ec6305e0a4645258247d129d9d4895fb51fa7bd91c990e64f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ft62j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.135905 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.142320 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.142383 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.142402 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.142427 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.142444 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:32Z","lastTransitionTime":"2025-11-26T22:22:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.158403 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.173296 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-q8dvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef28737-00fd-4738-ae1f-e02a5b974905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-q8dvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.191483 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.209064 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.230814 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa4d6694eef4e4b5bb0c06f961f4d9ad0670b58a9a450e98072559ed3ef57397\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.245390 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.245470 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.245495 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.245525 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.245547 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:32Z","lastTransitionTime":"2025-11-26T22:22:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.255862 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93ba50c6aca389bf5400db19615fbeb2a035b284237a1fdbfa1d6e7d4190d06f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f8766f396e0cb9ddd96b27b6d8fc7527c9941d4880205766836e53652d5005d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:22:00Z\\\",\\\"message\\\":\\\"[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/olm-operator-metrics\\\\\\\"}\\\\nI1126 22:21:59.996094 6549 services_controller.go:360] Finished syncing service olm-operator-metrics on namespace openshift-operator-lifecycle-manager for network=default : 1.571654ms\\\\nI1126 22:21:59.996100 6549 address_set.go:302] New(bf133528-8652-4c84-85ff-881f0afe9837/default-network-controller:EgressService:egresssvc-served-pods:v4/a13607449821398607916) with []\\\\nI1126 22:21:59.996165 6549 factory.go:1336] Added *v1.Node event handler 7\\\\nI1126 22:21:59.996226 6549 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1126 22:21:59.996567 6549 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1126 22:21:59.996607 6549 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"default/kubernetes\\\\\\\"}\\\\nI1126 22:21:59.996630 6549 services_controller.go:360] Finished syncing service kubernetes on namespace default for network=default : 1.690146ms\\\\nI1126 22:21:59.996675 6549 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1126 22:21:59.996753 6549 ovnkube.go:599] Stopped ovnkube\\\\nI1126 22:21:59.996794 6549 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 22:21:59.996872 6549 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:59Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93ba50c6aca389bf5400db19615fbeb2a035b284237a1fdbfa1d6e7d4190d06f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:22:31Z\\\",\\\"message\\\":\\\" 22:22:31.088028 6909 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1126 22:22:31.088033 6909 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1126 22:22:31.088043 6909 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 22:22:31.088047 6909 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 22:22:31.088038 6909 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 22:22:31.088064 6909 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 22:22:31.088070 6909 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 22:22:31.088073 6909 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1126 22:22:31.088088 6909 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 22:22:31.088101 6909 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 22:22:31.088109 6909 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 22:22:31.088117 6909 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1126 22:22:31.088114 6909 factory.go:656] Stopping watch factory\\\\nI1126 22:22:31.088120 6909 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 22:22:31.088127 6909 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 22:22:31.088133 6909 handler.go:208] Removed *v1.EgressIP ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:22:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.270970 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.290058 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.304669 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.318994 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.333890 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://969475c82879642645019fdc1665aaf0dc4c167c07887a47d02b4d5be0b6d66a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:22:22Z\\\",\\\"message\\\":\\\"2025-11-26T22:21:36+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_2b5e1290-d4c2-482e-9633-f17cd83ccefd\\\\n2025-11-26T22:21:36+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_2b5e1290-d4c2-482e-9633-f17cd83ccefd to /host/opt/cni/bin/\\\\n2025-11-26T22:21:37Z [verbose] multus-daemon started\\\\n2025-11-26T22:21:37Z [verbose] Readiness Indicator file check\\\\n2025-11-26T22:22:22Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:22:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.348622 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.348675 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.348692 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.348711 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.348746 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:32Z","lastTransitionTime":"2025-11-26T22:22:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.452269 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.452331 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.452354 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.452383 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.452405 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:32Z","lastTransitionTime":"2025-11-26T22:22:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.554987 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.555032 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.555045 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.555061 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.555071 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:32Z","lastTransitionTime":"2025-11-26T22:22:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.575376 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bbznt_ef55a921-a95f-4b2b-84b7-98c1082a1bb6/ovnkube-controller/3.log" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.579937 4903 scope.go:117] "RemoveContainer" containerID="93ba50c6aca389bf5400db19615fbeb2a035b284237a1fdbfa1d6e7d4190d06f" Nov 26 22:22:32 crc kubenswrapper[4903]: E1126 22:22:32.580163 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-bbznt_openshift-ovn-kubernetes(ef55a921-a95f-4b2b-84b7-98c1082a1bb6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.599820 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.615628 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-q8dvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef28737-00fd-4738-ae1f-e02a5b974905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-q8dvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.639389 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.659000 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.659069 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.659090 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.659118 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.659139 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:32Z","lastTransitionTime":"2025-11-26T22:22:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.660361 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.675733 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.691663 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa4d6694eef4e4b5bb0c06f961f4d9ad0670b58a9a450e98072559ed3ef57397\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.713033 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93ba50c6aca389bf5400db19615fbeb2a035b284237a1fdbfa1d6e7d4190d06f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93ba50c6aca389bf5400db19615fbeb2a035b284237a1fdbfa1d6e7d4190d06f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:22:31Z\\\",\\\"message\\\":\\\" 22:22:31.088028 6909 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1126 22:22:31.088033 6909 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1126 22:22:31.088043 6909 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 22:22:31.088047 6909 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 22:22:31.088038 6909 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 22:22:31.088064 6909 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 22:22:31.088070 6909 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 22:22:31.088073 6909 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1126 22:22:31.088088 6909 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 22:22:31.088101 6909 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 22:22:31.088109 6909 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 22:22:31.088117 6909 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1126 22:22:31.088114 6909 factory.go:656] Stopping watch factory\\\\nI1126 22:22:31.088120 6909 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 22:22:31.088127 6909 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 22:22:31.088133 6909 handler.go:208] Removed *v1.EgressIP ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:22:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-bbznt_openshift-ovn-kubernetes(ef55a921-a95f-4b2b-84b7-98c1082a1bb6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.724471 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.740176 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.755543 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.761383 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.761460 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.761478 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.761498 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.761513 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:32Z","lastTransitionTime":"2025-11-26T22:22:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.776288 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://969475c82879642645019fdc1665aaf0dc4c167c07887a47d02b4d5be0b6d66a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:22:22Z\\\",\\\"message\\\":\\\"2025-11-26T22:21:36+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_2b5e1290-d4c2-482e-9633-f17cd83ccefd\\\\n2025-11-26T22:21:36+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_2b5e1290-d4c2-482e-9633-f17cd83ccefd to /host/opt/cni/bin/\\\\n2025-11-26T22:21:37Z [verbose] multus-daemon started\\\\n2025-11-26T22:21:37Z [verbose] Readiness Indicator file check\\\\n2025-11-26T22:22:22Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:22:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.795702 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.814634 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.832603 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.844609 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.855466 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"18039c76-4e57-465f-9918-e618c823dff7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eecaea6ba41a970d1434000705f94cc7c998d76bc9b0721d5955e7bea7dde57a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8876ce44b1ced5ec6305e0a4645258247d129d9d4895fb51fa7bd91c990e64f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ft62j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.863460 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.863510 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.863575 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.863609 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.863633 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:32Z","lastTransitionTime":"2025-11-26T22:22:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.868286 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43088254-0fdc-4d7a-87cb-92306a8dc3ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8c2e4e68e3ec73eb79683a1de97bb7b5ef13c48db46c21b18f4bcee08be716a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3023824bcd9e429032a9d99b81a4b821b5c9382cefc352f53ad75a87a1088d66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7612eb84c353536c5410b0149cdd9a96cd3a47311a0a49f0865d9f5a1d5dd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5140752950b03c2f8e78fb277116a8ef2df734ac613cf7b1af4c78876547c8ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5140752950b03c2f8e78fb277116a8ef2df734ac613cf7b1af4c78876547c8ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:32Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.966227 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.966288 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.966307 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.966331 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:32 crc kubenswrapper[4903]: I1126 22:22:32.966350 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:32Z","lastTransitionTime":"2025-11-26T22:22:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.028412 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.028505 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.028433 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:33 crc kubenswrapper[4903]: E1126 22:22:33.028633 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:22:33 crc kubenswrapper[4903]: E1126 22:22:33.028832 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:22:33 crc kubenswrapper[4903]: E1126 22:22:33.029029 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.069737 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.069797 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.069815 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.069838 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.069856 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:33Z","lastTransitionTime":"2025-11-26T22:22:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.173144 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.173217 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.173241 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.173273 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.173298 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:33Z","lastTransitionTime":"2025-11-26T22:22:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.275604 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.275657 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.275673 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.275701 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.275751 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:33Z","lastTransitionTime":"2025-11-26T22:22:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.379083 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.379151 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.379192 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.379223 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.379244 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:33Z","lastTransitionTime":"2025-11-26T22:22:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.483358 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.483419 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.483430 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.483450 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.483463 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:33Z","lastTransitionTime":"2025-11-26T22:22:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.586254 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.586307 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.586324 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.586346 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.586362 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:33Z","lastTransitionTime":"2025-11-26T22:22:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.690073 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.690122 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.690134 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.690155 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.690165 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:33Z","lastTransitionTime":"2025-11-26T22:22:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.793467 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.793600 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.793763 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.793817 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.793841 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:33Z","lastTransitionTime":"2025-11-26T22:22:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.897820 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.897886 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.897903 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.897927 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:33 crc kubenswrapper[4903]: I1126 22:22:33.897947 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:33Z","lastTransitionTime":"2025-11-26T22:22:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.000191 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.000247 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.000263 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.000287 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.000302 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:34Z","lastTransitionTime":"2025-11-26T22:22:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.028541 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:34 crc kubenswrapper[4903]: E1126 22:22:34.028682 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.102767 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.102811 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.102821 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.102836 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.102846 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:34Z","lastTransitionTime":"2025-11-26T22:22:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.205282 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.205350 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.205373 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.205401 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.205425 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:34Z","lastTransitionTime":"2025-11-26T22:22:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.307911 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.307979 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.308004 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.308034 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.308056 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:34Z","lastTransitionTime":"2025-11-26T22:22:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.411479 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.411520 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.411530 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.411546 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.411557 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:34Z","lastTransitionTime":"2025-11-26T22:22:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.514598 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.514978 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.515119 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.515263 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.515402 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:34Z","lastTransitionTime":"2025-11-26T22:22:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.618994 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.619054 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.619070 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.619094 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.619112 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:34Z","lastTransitionTime":"2025-11-26T22:22:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.720950 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.720988 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.721005 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.721027 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.721045 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:34Z","lastTransitionTime":"2025-11-26T22:22:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.823124 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.823172 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.823186 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.823202 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.823213 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:34Z","lastTransitionTime":"2025-11-26T22:22:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.925921 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.925996 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.926013 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.926037 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:34 crc kubenswrapper[4903]: I1126 22:22:34.926055 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:34Z","lastTransitionTime":"2025-11-26T22:22:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.027332 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.027415 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:35 crc kubenswrapper[4903]: E1126 22:22:35.027617 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.027673 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:35 crc kubenswrapper[4903]: E1126 22:22:35.027758 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:22:35 crc kubenswrapper[4903]: E1126 22:22:35.027808 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.028852 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.028915 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.028939 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.028965 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.028986 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:35Z","lastTransitionTime":"2025-11-26T22:22:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.132227 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.132301 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.132321 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.132353 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.132370 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:35Z","lastTransitionTime":"2025-11-26T22:22:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.236448 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.236508 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.236525 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.236546 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.236563 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:35Z","lastTransitionTime":"2025-11-26T22:22:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.339291 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.339350 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.339367 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.339391 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.339407 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:35Z","lastTransitionTime":"2025-11-26T22:22:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.441765 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.441829 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.441847 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.441870 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.441888 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:35Z","lastTransitionTime":"2025-11-26T22:22:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.544527 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.544578 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.544593 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.544618 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.544636 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:35Z","lastTransitionTime":"2025-11-26T22:22:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.648033 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.648071 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.648081 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.648096 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.648107 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:35Z","lastTransitionTime":"2025-11-26T22:22:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.751332 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.751396 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.751418 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.751448 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.751471 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:35Z","lastTransitionTime":"2025-11-26T22:22:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.853953 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.854006 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.854022 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.854044 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.854060 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:35Z","lastTransitionTime":"2025-11-26T22:22:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.957110 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.957145 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.957154 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.957168 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:35 crc kubenswrapper[4903]: I1126 22:22:35.957177 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:35Z","lastTransitionTime":"2025-11-26T22:22:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.027960 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:36 crc kubenswrapper[4903]: E1126 22:22:36.028150 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.060118 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.060170 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.060188 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.060211 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.060227 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:36Z","lastTransitionTime":"2025-11-26T22:22:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.162893 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.162961 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.162987 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.163015 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.163036 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:36Z","lastTransitionTime":"2025-11-26T22:22:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.265629 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.265682 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.265739 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.265797 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.265820 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:36Z","lastTransitionTime":"2025-11-26T22:22:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.368487 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.368533 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.368545 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.368562 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.368574 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:36Z","lastTransitionTime":"2025-11-26T22:22:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.472313 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.472374 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.472391 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.472416 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.472433 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:36Z","lastTransitionTime":"2025-11-26T22:22:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.575030 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.575100 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.575117 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.575140 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.575160 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:36Z","lastTransitionTime":"2025-11-26T22:22:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.677983 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.678046 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.678068 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.678097 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.678121 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:36Z","lastTransitionTime":"2025-11-26T22:22:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.718913 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:22:36 crc kubenswrapper[4903]: E1126 22:22:36.719107 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:40.719073745 +0000 UTC m=+149.409308655 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.719177 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:36 crc kubenswrapper[4903]: E1126 22:22:36.719391 4903 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 22:22:36 crc kubenswrapper[4903]: E1126 22:22:36.719510 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 22:23:40.719481936 +0000 UTC m=+149.409716876 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.780996 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.781043 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.781059 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.781083 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.781100 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:36Z","lastTransitionTime":"2025-11-26T22:22:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.820496 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.820843 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.820958 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:36 crc kubenswrapper[4903]: E1126 22:22:36.820716 4903 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 22:22:36 crc kubenswrapper[4903]: E1126 22:22:36.821016 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 22:22:36 crc kubenswrapper[4903]: E1126 22:22:36.821186 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 22:23:40.82116891 +0000 UTC m=+149.511403820 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 22:22:36 crc kubenswrapper[4903]: E1126 22:22:36.821198 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 22:22:36 crc kubenswrapper[4903]: E1126 22:22:36.821332 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 22:22:36 crc kubenswrapper[4903]: E1126 22:22:36.821347 4903 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:22:36 crc kubenswrapper[4903]: E1126 22:22:36.821405 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 22:23:40.821385846 +0000 UTC m=+149.511620756 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:22:36 crc kubenswrapper[4903]: E1126 22:22:36.821289 4903 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 22:22:36 crc kubenswrapper[4903]: E1126 22:22:36.821428 4903 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:22:36 crc kubenswrapper[4903]: E1126 22:22:36.821463 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 22:23:40.821454068 +0000 UTC m=+149.511689088 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.884272 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.884484 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.884512 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.884545 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.884567 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:36Z","lastTransitionTime":"2025-11-26T22:22:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.986657 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.986758 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.986779 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.986802 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:36 crc kubenswrapper[4903]: I1126 22:22:36.986875 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:36Z","lastTransitionTime":"2025-11-26T22:22:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.027917 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.027917 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.027928 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:37 crc kubenswrapper[4903]: E1126 22:22:37.028373 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:22:37 crc kubenswrapper[4903]: E1126 22:22:37.028133 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:22:37 crc kubenswrapper[4903]: E1126 22:22:37.028470 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.089048 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.089105 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.089123 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.089145 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.089161 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:37Z","lastTransitionTime":"2025-11-26T22:22:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.192072 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.192142 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.192163 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.192191 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.192211 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:37Z","lastTransitionTime":"2025-11-26T22:22:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.294992 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.295292 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.295430 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.295549 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.295752 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:37Z","lastTransitionTime":"2025-11-26T22:22:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.398132 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.398198 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.398217 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.398242 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.398260 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:37Z","lastTransitionTime":"2025-11-26T22:22:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.500573 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.500615 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.500625 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.500640 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.500651 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:37Z","lastTransitionTime":"2025-11-26T22:22:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.604046 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.604107 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.604122 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.604146 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.604160 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:37Z","lastTransitionTime":"2025-11-26T22:22:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.707481 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.707545 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.707561 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.707580 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.707593 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:37Z","lastTransitionTime":"2025-11-26T22:22:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.810569 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.810667 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.810680 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.810726 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.810740 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:37Z","lastTransitionTime":"2025-11-26T22:22:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.913028 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.913075 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.913093 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.913115 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:37 crc kubenswrapper[4903]: I1126 22:22:37.913131 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:37Z","lastTransitionTime":"2025-11-26T22:22:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.016276 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.016330 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.016350 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.016376 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.016397 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:38Z","lastTransitionTime":"2025-11-26T22:22:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.028006 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:38 crc kubenswrapper[4903]: E1126 22:22:38.028176 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.118061 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.118107 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.118119 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.118137 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.118148 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:38Z","lastTransitionTime":"2025-11-26T22:22:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.220419 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.220459 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.220469 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.220484 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.220493 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:38Z","lastTransitionTime":"2025-11-26T22:22:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.322938 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.322990 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.323002 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.323018 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.323032 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:38Z","lastTransitionTime":"2025-11-26T22:22:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.428845 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.428889 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.428904 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.428923 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.428937 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:38Z","lastTransitionTime":"2025-11-26T22:22:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.531540 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.531596 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.531610 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.531628 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.531642 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:38Z","lastTransitionTime":"2025-11-26T22:22:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.634066 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.634126 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.634139 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.634160 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.634173 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:38Z","lastTransitionTime":"2025-11-26T22:22:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.737552 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.737601 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.737618 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.737645 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.737662 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:38Z","lastTransitionTime":"2025-11-26T22:22:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.840817 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.840914 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.840965 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.840991 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.841010 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:38Z","lastTransitionTime":"2025-11-26T22:22:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.943845 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.943898 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.943915 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.943939 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:38 crc kubenswrapper[4903]: I1126 22:22:38.943957 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:38Z","lastTransitionTime":"2025-11-26T22:22:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.027507 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.027637 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.027763 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:39 crc kubenswrapper[4903]: E1126 22:22:39.027820 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:22:39 crc kubenswrapper[4903]: E1126 22:22:39.027900 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:22:39 crc kubenswrapper[4903]: E1126 22:22:39.028090 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.047143 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.047206 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.047224 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.047248 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.047265 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:39Z","lastTransitionTime":"2025-11-26T22:22:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.149730 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.149780 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.149795 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.149813 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.149826 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:39Z","lastTransitionTime":"2025-11-26T22:22:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.252842 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.252905 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.252922 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.252947 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.252966 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:39Z","lastTransitionTime":"2025-11-26T22:22:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.355316 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.355405 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.355424 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.355479 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.355499 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:39Z","lastTransitionTime":"2025-11-26T22:22:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.459056 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.459139 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.459158 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.459681 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.459774 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:39Z","lastTransitionTime":"2025-11-26T22:22:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.562865 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.562975 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.562997 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.563026 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.563048 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:39Z","lastTransitionTime":"2025-11-26T22:22:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.666067 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.666124 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.666140 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.666162 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.666182 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:39Z","lastTransitionTime":"2025-11-26T22:22:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.769066 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.769123 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.769139 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.769162 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.769179 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:39Z","lastTransitionTime":"2025-11-26T22:22:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.872118 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.872164 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.872179 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.872202 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.872218 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:39Z","lastTransitionTime":"2025-11-26T22:22:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.975355 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.975405 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.975421 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.975447 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:39 crc kubenswrapper[4903]: I1126 22:22:39.975465 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:39Z","lastTransitionTime":"2025-11-26T22:22:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.028399 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:40 crc kubenswrapper[4903]: E1126 22:22:40.028582 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.079481 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.079530 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.079547 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.079574 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.079591 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:40Z","lastTransitionTime":"2025-11-26T22:22:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.182759 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.182820 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.182836 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.182862 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.182879 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:40Z","lastTransitionTime":"2025-11-26T22:22:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.285604 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.285670 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.285688 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.285752 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.285773 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:40Z","lastTransitionTime":"2025-11-26T22:22:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.388218 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.388282 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.388299 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.388323 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.388340 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:40Z","lastTransitionTime":"2025-11-26T22:22:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.490820 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.490926 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.490948 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.490972 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.490989 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:40Z","lastTransitionTime":"2025-11-26T22:22:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.593801 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.593871 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.593889 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.593913 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.593934 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:40Z","lastTransitionTime":"2025-11-26T22:22:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.697093 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.697128 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.697138 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.697154 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.697164 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:40Z","lastTransitionTime":"2025-11-26T22:22:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.723108 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.723214 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.723233 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.723257 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.723277 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:40Z","lastTransitionTime":"2025-11-26T22:22:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:40 crc kubenswrapper[4903]: E1126 22:22:40.740428 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:40Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.745144 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.745233 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.745245 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.745263 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.745275 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:40Z","lastTransitionTime":"2025-11-26T22:22:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:40 crc kubenswrapper[4903]: E1126 22:22:40.760716 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:40Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.765404 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.765459 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.765476 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.765497 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.765514 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:40Z","lastTransitionTime":"2025-11-26T22:22:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:40 crc kubenswrapper[4903]: E1126 22:22:40.779991 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:40Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.784780 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.784839 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.784857 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.784887 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.784905 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:40Z","lastTransitionTime":"2025-11-26T22:22:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:40 crc kubenswrapper[4903]: E1126 22:22:40.801314 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:40Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.808799 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.808864 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.808882 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.808904 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.808924 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:40Z","lastTransitionTime":"2025-11-26T22:22:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:40 crc kubenswrapper[4903]: E1126 22:22:40.823619 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:40Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:40 crc kubenswrapper[4903]: E1126 22:22:40.823817 4903 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.825757 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.825792 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.825805 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.825825 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.825838 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:40Z","lastTransitionTime":"2025-11-26T22:22:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.928832 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.928905 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.928926 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.928955 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:40 crc kubenswrapper[4903]: I1126 22:22:40.928973 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:40Z","lastTransitionTime":"2025-11-26T22:22:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.028377 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.028458 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.028469 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:41 crc kubenswrapper[4903]: E1126 22:22:41.028652 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:22:41 crc kubenswrapper[4903]: E1126 22:22:41.028878 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:22:41 crc kubenswrapper[4903]: E1126 22:22:41.029085 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.031559 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.031616 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.031632 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.031655 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.031673 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:41Z","lastTransitionTime":"2025-11-26T22:22:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.043976 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.134804 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.134867 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.134886 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.134909 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.134930 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:41Z","lastTransitionTime":"2025-11-26T22:22:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.238000 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.238059 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.238078 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.238103 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.238120 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:41Z","lastTransitionTime":"2025-11-26T22:22:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.341675 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.341752 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.341768 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.341791 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.341807 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:41Z","lastTransitionTime":"2025-11-26T22:22:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.454299 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.454359 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.454381 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.454412 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.454431 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:41Z","lastTransitionTime":"2025-11-26T22:22:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.557681 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.558133 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.558342 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.558539 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.558771 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:41Z","lastTransitionTime":"2025-11-26T22:22:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.661616 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.661683 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.661720 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.661740 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.661754 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:41Z","lastTransitionTime":"2025-11-26T22:22:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.764762 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.764817 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.764833 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.764855 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.764871 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:41Z","lastTransitionTime":"2025-11-26T22:22:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.868326 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.868386 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.868404 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.868428 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.868446 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:41Z","lastTransitionTime":"2025-11-26T22:22:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.971044 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.971113 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.971136 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.971166 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:41 crc kubenswrapper[4903]: I1126 22:22:41.971185 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:41Z","lastTransitionTime":"2025-11-26T22:22:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.028325 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:42 crc kubenswrapper[4903]: E1126 22:22:42.028518 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.050730 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.071003 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.074972 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.075044 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.075068 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.075100 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.075122 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:42Z","lastTransitionTime":"2025-11-26T22:22:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.085848 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-q8dvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef28737-00fd-4738-ae1f-e02a5b974905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-q8dvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.116614 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93ba50c6aca389bf5400db19615fbeb2a035b284237a1fdbfa1d6e7d4190d06f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93ba50c6aca389bf5400db19615fbeb2a035b284237a1fdbfa1d6e7d4190d06f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:22:31Z\\\",\\\"message\\\":\\\" 22:22:31.088028 6909 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1126 22:22:31.088033 6909 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1126 22:22:31.088043 6909 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 22:22:31.088047 6909 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 22:22:31.088038 6909 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 22:22:31.088064 6909 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 22:22:31.088070 6909 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 22:22:31.088073 6909 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1126 22:22:31.088088 6909 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 22:22:31.088101 6909 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 22:22:31.088109 6909 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 22:22:31.088117 6909 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1126 22:22:31.088114 6909 factory.go:656] Stopping watch factory\\\\nI1126 22:22:31.088120 6909 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 22:22:31.088127 6909 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 22:22:31.088133 6909 handler.go:208] Removed *v1.EgressIP ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:22:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-bbznt_openshift-ovn-kubernetes(ef55a921-a95f-4b2b-84b7-98c1082a1bb6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.132986 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.150978 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7ab8c5eb-fded-446c-8645-4e529cc12a2e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0c79be23ae25bb1672628bae5152c8ba607b3afaa8eaefa0060cbf0480a673fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1a0fb986ff4e8a7b0a593d07e4d3db62972a7ff1369a2d65a9ec2a2a3660dbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1a0fb986ff4e8a7b0a593d07e4d3db62972a7ff1369a2d65a9ec2a2a3660dbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.173325 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.177024 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.177087 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.177110 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.177139 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.177160 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:42Z","lastTransitionTime":"2025-11-26T22:22:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.193440 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.215028 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa4d6694eef4e4b5bb0c06f961f4d9ad0670b58a9a450e98072559ed3ef57397\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.233548 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.250975 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.267581 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.280370 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.280425 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.280442 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.280466 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.280482 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:42Z","lastTransitionTime":"2025-11-26T22:22:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.286524 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://969475c82879642645019fdc1665aaf0dc4c167c07887a47d02b4d5be0b6d66a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:22:22Z\\\",\\\"message\\\":\\\"2025-11-26T22:21:36+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_2b5e1290-d4c2-482e-9633-f17cd83ccefd\\\\n2025-11-26T22:21:36+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_2b5e1290-d4c2-482e-9633-f17cd83ccefd to /host/opt/cni/bin/\\\\n2025-11-26T22:21:37Z [verbose] multus-daemon started\\\\n2025-11-26T22:21:37Z [verbose] Readiness Indicator file check\\\\n2025-11-26T22:22:22Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:22:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.305043 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43088254-0fdc-4d7a-87cb-92306a8dc3ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8c2e4e68e3ec73eb79683a1de97bb7b5ef13c48db46c21b18f4bcee08be716a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3023824bcd9e429032a9d99b81a4b821b5c9382cefc352f53ad75a87a1088d66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7612eb84c353536c5410b0149cdd9a96cd3a47311a0a49f0865d9f5a1d5dd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5140752950b03c2f8e78fb277116a8ef2df734ac613cf7b1af4c78876547c8ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5140752950b03c2f8e78fb277116a8ef2df734ac613cf7b1af4c78876547c8ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.327328 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.342515 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.358234 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.373097 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"18039c76-4e57-465f-9918-e618c823dff7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eecaea6ba41a970d1434000705f94cc7c998d76bc9b0721d5955e7bea7dde57a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8876ce44b1ced5ec6305e0a4645258247d129d9d4895fb51fa7bd91c990e64f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ft62j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:42Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.383012 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.383047 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.383055 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.383068 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.383077 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:42Z","lastTransitionTime":"2025-11-26T22:22:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.486911 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.486980 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.487002 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.487035 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.487058 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:42Z","lastTransitionTime":"2025-11-26T22:22:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.592449 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.592477 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.592485 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.592499 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.592533 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:42Z","lastTransitionTime":"2025-11-26T22:22:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.695625 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.695726 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.695772 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.695802 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.695820 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:42Z","lastTransitionTime":"2025-11-26T22:22:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.800044 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.800107 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.800125 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.800154 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.800171 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:42Z","lastTransitionTime":"2025-11-26T22:22:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.904186 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.904240 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.904259 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.904284 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:42 crc kubenswrapper[4903]: I1126 22:22:42.904303 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:42Z","lastTransitionTime":"2025-11-26T22:22:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.007541 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.007608 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.007625 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.007651 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.007668 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:43Z","lastTransitionTime":"2025-11-26T22:22:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.028030 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:43 crc kubenswrapper[4903]: E1126 22:22:43.028161 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.028249 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.028274 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:43 crc kubenswrapper[4903]: E1126 22:22:43.028577 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:22:43 crc kubenswrapper[4903]: E1126 22:22:43.028748 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.111629 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.111740 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.111764 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.111794 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.111817 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:43Z","lastTransitionTime":"2025-11-26T22:22:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.214966 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.215012 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.215022 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.215037 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.215049 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:43Z","lastTransitionTime":"2025-11-26T22:22:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.318012 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.318055 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.318068 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.318085 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.318099 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:43Z","lastTransitionTime":"2025-11-26T22:22:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.422380 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.422472 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.422489 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.422544 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.422564 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:43Z","lastTransitionTime":"2025-11-26T22:22:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.525821 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.525899 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.525918 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.525944 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.525962 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:43Z","lastTransitionTime":"2025-11-26T22:22:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.628859 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.628909 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.628926 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.628953 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.628971 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:43Z","lastTransitionTime":"2025-11-26T22:22:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.732006 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.732062 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.732080 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.732103 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.732121 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:43Z","lastTransitionTime":"2025-11-26T22:22:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.835395 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.835625 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.835650 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.835681 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.835800 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:43Z","lastTransitionTime":"2025-11-26T22:22:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.939307 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.939424 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.939448 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.939486 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:43 crc kubenswrapper[4903]: I1126 22:22:43.939515 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:43Z","lastTransitionTime":"2025-11-26T22:22:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.028508 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:44 crc kubenswrapper[4903]: E1126 22:22:44.028785 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.029965 4903 scope.go:117] "RemoveContainer" containerID="93ba50c6aca389bf5400db19615fbeb2a035b284237a1fdbfa1d6e7d4190d06f" Nov 26 22:22:44 crc kubenswrapper[4903]: E1126 22:22:44.030502 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-bbznt_openshift-ovn-kubernetes(ef55a921-a95f-4b2b-84b7-98c1082a1bb6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.043264 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.043314 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.043334 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.043359 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.043383 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:44Z","lastTransitionTime":"2025-11-26T22:22:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.146616 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.146743 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.146765 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.146801 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.146823 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:44Z","lastTransitionTime":"2025-11-26T22:22:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.250546 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.250608 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.250625 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.250652 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.250670 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:44Z","lastTransitionTime":"2025-11-26T22:22:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.354346 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.354418 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.354437 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.354466 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.354484 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:44Z","lastTransitionTime":"2025-11-26T22:22:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.458376 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.458440 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.458463 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.458493 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.458515 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:44Z","lastTransitionTime":"2025-11-26T22:22:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.564036 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.564107 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.564131 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.564163 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.564187 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:44Z","lastTransitionTime":"2025-11-26T22:22:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.667653 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.667763 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.667784 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.667814 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.667842 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:44Z","lastTransitionTime":"2025-11-26T22:22:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.771470 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.771839 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.771866 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.772391 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.772425 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:44Z","lastTransitionTime":"2025-11-26T22:22:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.876249 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.876297 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.876314 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.876337 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.876356 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:44Z","lastTransitionTime":"2025-11-26T22:22:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.979138 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.979206 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.979225 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.979249 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:44 crc kubenswrapper[4903]: I1126 22:22:44.979266 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:44Z","lastTransitionTime":"2025-11-26T22:22:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.027430 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.027496 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.027530 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:45 crc kubenswrapper[4903]: E1126 22:22:45.027626 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:22:45 crc kubenswrapper[4903]: E1126 22:22:45.027811 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:22:45 crc kubenswrapper[4903]: E1126 22:22:45.027907 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.081921 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.081980 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.081997 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.082023 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.082047 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:45Z","lastTransitionTime":"2025-11-26T22:22:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.184246 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.184358 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.184432 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.184462 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.184536 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:45Z","lastTransitionTime":"2025-11-26T22:22:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.288930 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.288998 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.289015 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.289040 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.289057 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:45Z","lastTransitionTime":"2025-11-26T22:22:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.393045 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.393109 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.393126 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.393166 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.393184 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:45Z","lastTransitionTime":"2025-11-26T22:22:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.495975 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.496036 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.496053 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.496076 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.496093 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:45Z","lastTransitionTime":"2025-11-26T22:22:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.598813 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.598862 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.598878 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.598900 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.598918 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:45Z","lastTransitionTime":"2025-11-26T22:22:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.701270 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.701330 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.701353 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.701380 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.701399 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:45Z","lastTransitionTime":"2025-11-26T22:22:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.804421 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.804584 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.804619 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.804654 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.804677 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:45Z","lastTransitionTime":"2025-11-26T22:22:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.907881 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.907938 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.907955 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.907978 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:45 crc kubenswrapper[4903]: I1126 22:22:45.908049 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:45Z","lastTransitionTime":"2025-11-26T22:22:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.010615 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.010674 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.010729 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.010754 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.010812 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:46Z","lastTransitionTime":"2025-11-26T22:22:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.029023 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:46 crc kubenswrapper[4903]: E1126 22:22:46.029221 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.113464 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.113553 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.113580 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.113613 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.113637 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:46Z","lastTransitionTime":"2025-11-26T22:22:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.217270 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.217339 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.217362 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.217390 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.217410 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:46Z","lastTransitionTime":"2025-11-26T22:22:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.320749 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.320806 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.320829 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.320861 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.320887 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:46Z","lastTransitionTime":"2025-11-26T22:22:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.424686 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.424782 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.424800 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.424826 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.424844 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:46Z","lastTransitionTime":"2025-11-26T22:22:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.527482 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.527541 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.527558 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.527581 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.527599 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:46Z","lastTransitionTime":"2025-11-26T22:22:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.630270 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.630328 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.630348 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.630375 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.630394 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:46Z","lastTransitionTime":"2025-11-26T22:22:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.733676 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.733878 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.733900 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.733923 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.733940 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:46Z","lastTransitionTime":"2025-11-26T22:22:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.837112 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.837169 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.837187 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.837210 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.837227 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:46Z","lastTransitionTime":"2025-11-26T22:22:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.940214 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.940282 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.940305 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.940332 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:46 crc kubenswrapper[4903]: I1126 22:22:46.940354 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:46Z","lastTransitionTime":"2025-11-26T22:22:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.028458 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.028505 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:47 crc kubenswrapper[4903]: E1126 22:22:47.029086 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.028535 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:47 crc kubenswrapper[4903]: E1126 22:22:47.029215 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:22:47 crc kubenswrapper[4903]: E1126 22:22:47.029470 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.043815 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.043877 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.043896 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.043965 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.043986 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:47Z","lastTransitionTime":"2025-11-26T22:22:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.146288 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.146351 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.146370 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.146398 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.146416 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:47Z","lastTransitionTime":"2025-11-26T22:22:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.252426 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.252500 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.252513 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.252530 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.252562 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:47Z","lastTransitionTime":"2025-11-26T22:22:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.356643 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.356780 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.356799 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.356825 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.356844 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:47Z","lastTransitionTime":"2025-11-26T22:22:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.459833 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.459894 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.459911 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.459934 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.459954 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:47Z","lastTransitionTime":"2025-11-26T22:22:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.562666 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.562765 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.562785 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.562808 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.562825 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:47Z","lastTransitionTime":"2025-11-26T22:22:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.665621 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.665675 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.665729 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.665759 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.665776 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:47Z","lastTransitionTime":"2025-11-26T22:22:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.768658 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.768754 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.768777 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.768805 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.768823 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:47Z","lastTransitionTime":"2025-11-26T22:22:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.872006 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.872071 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.872088 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.872111 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.872129 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:47Z","lastTransitionTime":"2025-11-26T22:22:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.975482 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.975523 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.975534 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.975550 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:47 crc kubenswrapper[4903]: I1126 22:22:47.975561 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:47Z","lastTransitionTime":"2025-11-26T22:22:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.032065 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:48 crc kubenswrapper[4903]: E1126 22:22:48.032239 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.079212 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.079258 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.079269 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.079297 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.079311 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:48Z","lastTransitionTime":"2025-11-26T22:22:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.182977 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.183125 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.183141 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.183163 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.183180 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:48Z","lastTransitionTime":"2025-11-26T22:22:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.287243 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.287293 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.287306 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.287324 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.287338 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:48Z","lastTransitionTime":"2025-11-26T22:22:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.390087 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.390160 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.390177 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.390203 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.390220 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:48Z","lastTransitionTime":"2025-11-26T22:22:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.493263 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.493319 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.493335 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.493360 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.493377 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:48Z","lastTransitionTime":"2025-11-26T22:22:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.596803 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.596861 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.596879 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.596902 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.596919 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:48Z","lastTransitionTime":"2025-11-26T22:22:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.699022 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.699122 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.699141 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.699204 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.699222 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:48Z","lastTransitionTime":"2025-11-26T22:22:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.802803 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.802864 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.802882 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.802915 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.802965 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:48Z","lastTransitionTime":"2025-11-26T22:22:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.905867 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.905934 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.905952 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.905978 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:48 crc kubenswrapper[4903]: I1126 22:22:48.905997 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:48Z","lastTransitionTime":"2025-11-26T22:22:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.008448 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.008491 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.008499 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.008514 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.008526 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:49Z","lastTransitionTime":"2025-11-26T22:22:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.027441 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.027529 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:49 crc kubenswrapper[4903]: E1126 22:22:49.027594 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:22:49 crc kubenswrapper[4903]: E1126 22:22:49.027759 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.027538 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:49 crc kubenswrapper[4903]: E1126 22:22:49.027930 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.111599 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.111684 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.111736 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.111762 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.111783 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:49Z","lastTransitionTime":"2025-11-26T22:22:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.215208 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.215275 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.215300 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.215329 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.215349 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:49Z","lastTransitionTime":"2025-11-26T22:22:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.318044 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.318083 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.318097 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.318114 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.318126 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:49Z","lastTransitionTime":"2025-11-26T22:22:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.421033 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.421094 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.421113 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.421140 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.421158 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:49Z","lastTransitionTime":"2025-11-26T22:22:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.524459 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.524506 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.524519 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.524539 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.524552 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:49Z","lastTransitionTime":"2025-11-26T22:22:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.626670 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.626789 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.626808 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.626839 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.626861 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:49Z","lastTransitionTime":"2025-11-26T22:22:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.730487 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.730546 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.730566 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.730591 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.730608 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:49Z","lastTransitionTime":"2025-11-26T22:22:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.833639 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.833740 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.833762 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.833786 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.833805 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:49Z","lastTransitionTime":"2025-11-26T22:22:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.937066 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.937139 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.937156 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.937180 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:49 crc kubenswrapper[4903]: I1126 22:22:49.937199 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:49Z","lastTransitionTime":"2025-11-26T22:22:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.027796 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:50 crc kubenswrapper[4903]: E1126 22:22:50.027983 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.041463 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.041508 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.041520 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.041535 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.041548 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:50Z","lastTransitionTime":"2025-11-26T22:22:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.144229 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.144300 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.144318 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.144341 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.144358 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:50Z","lastTransitionTime":"2025-11-26T22:22:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.247790 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.247865 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.247884 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.247912 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.247930 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:50Z","lastTransitionTime":"2025-11-26T22:22:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.350833 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.350874 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.350886 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.350901 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.350914 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:50Z","lastTransitionTime":"2025-11-26T22:22:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.453603 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.453731 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.453752 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.453777 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.453794 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:50Z","lastTransitionTime":"2025-11-26T22:22:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.557257 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.557395 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.557416 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.557440 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.557457 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:50Z","lastTransitionTime":"2025-11-26T22:22:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.660277 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.660313 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.660324 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.660342 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.660355 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:50Z","lastTransitionTime":"2025-11-26T22:22:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.763358 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.763419 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.763437 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.763461 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.763478 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:50Z","lastTransitionTime":"2025-11-26T22:22:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.865811 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.865864 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.865882 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.865905 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.865922 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:50Z","lastTransitionTime":"2025-11-26T22:22:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.968328 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.968406 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.968428 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.968462 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:50 crc kubenswrapper[4903]: I1126 22:22:50.968484 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:50Z","lastTransitionTime":"2025-11-26T22:22:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.024934 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.025002 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.025021 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.025044 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.025060 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:51Z","lastTransitionTime":"2025-11-26T22:22:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.027336 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.027444 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:51 crc kubenswrapper[4903]: E1126 22:22:51.027498 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.027520 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:51 crc kubenswrapper[4903]: E1126 22:22:51.027638 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:22:51 crc kubenswrapper[4903]: E1126 22:22:51.027763 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:22:51 crc kubenswrapper[4903]: E1126 22:22:51.047443 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:51Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.055610 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.055672 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.055729 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.055757 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.055776 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:51Z","lastTransitionTime":"2025-11-26T22:22:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:51 crc kubenswrapper[4903]: E1126 22:22:51.080021 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:51Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.084571 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.084611 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.084624 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.084639 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.084651 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:51Z","lastTransitionTime":"2025-11-26T22:22:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:51 crc kubenswrapper[4903]: E1126 22:22:51.103948 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:51Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.108308 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.108349 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.108363 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.108380 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.108392 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:51Z","lastTransitionTime":"2025-11-26T22:22:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:51 crc kubenswrapper[4903]: E1126 22:22:51.121976 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:51Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.126491 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.126530 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.126542 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.126560 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.126570 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:51Z","lastTransitionTime":"2025-11-26T22:22:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:51 crc kubenswrapper[4903]: E1126 22:22:51.139769 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:51Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:51 crc kubenswrapper[4903]: E1126 22:22:51.139995 4903 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.142135 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.142183 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.142200 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.142223 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.142241 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:51Z","lastTransitionTime":"2025-11-26T22:22:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.244826 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.244884 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.244900 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.244924 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.244942 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:51Z","lastTransitionTime":"2025-11-26T22:22:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.348419 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.348482 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.348500 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.348525 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.348542 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:51Z","lastTransitionTime":"2025-11-26T22:22:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.452089 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.452144 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.452163 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.452189 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.452214 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:51Z","lastTransitionTime":"2025-11-26T22:22:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.555065 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.555186 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.555206 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.555248 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.555265 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:51Z","lastTransitionTime":"2025-11-26T22:22:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.697913 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.697953 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.697967 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.697983 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.697993 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:51Z","lastTransitionTime":"2025-11-26T22:22:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.800135 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.800212 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.800230 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.800254 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.800271 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:51Z","lastTransitionTime":"2025-11-26T22:22:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.903491 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.903554 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.903575 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.903600 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:51 crc kubenswrapper[4903]: I1126 22:22:51.903618 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:51Z","lastTransitionTime":"2025-11-26T22:22:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.006057 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.006115 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.006132 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.006155 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.006174 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:52Z","lastTransitionTime":"2025-11-26T22:22:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.028064 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:52 crc kubenswrapper[4903]: E1126 22:22:52.028204 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.047194 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4943d6ca-5152-4ac1-a9d3-850d5a5063b7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aa4d6694eef4e4b5bb0c06f961f4d9ad0670b58a9a450e98072559ed3ef57397\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bcf5e0f1249ba32f9d0a5676878504837631401a15ab28b9dc2e22f612609563\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b98d3e7fa05503559642c1e7071c166a4c431f660e4cc8f30cac7dfaf305e35\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://54da76782022b27da3ba0ef2512ba19b9a6b53ac2aca4b848e5e54070834db4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fc907f5c7235ede5c3f8562816f1f995856d66d08bc45743202d7eaea834f94\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6cf957f0483f549127052b5148675b3e666d93d9d809d23a92a2530de8e470c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b332dd66b04388ed388f615c4316bb58e4cedb110cb82fbb3819eb29660baa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zt8rx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-2z7vf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.050268 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.079057 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93ba50c6aca389bf5400db19615fbeb2a035b284237a1fdbfa1d6e7d4190d06f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93ba50c6aca389bf5400db19615fbeb2a035b284237a1fdbfa1d6e7d4190d06f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:22:31Z\\\",\\\"message\\\":\\\" 22:22:31.088028 6909 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1126 22:22:31.088033 6909 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1126 22:22:31.088043 6909 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 22:22:31.088047 6909 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 22:22:31.088038 6909 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 22:22:31.088064 6909 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 22:22:31.088070 6909 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 22:22:31.088073 6909 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1126 22:22:31.088088 6909 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 22:22:31.088101 6909 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 22:22:31.088109 6909 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 22:22:31.088117 6909 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1126 22:22:31.088114 6909 factory.go:656] Stopping watch factory\\\\nI1126 22:22:31.088120 6909 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 22:22:31.088127 6909 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 22:22:31.088133 6909 handler.go:208] Removed *v1.EgressIP ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:22:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-bbznt_openshift-ovn-kubernetes(ef55a921-a95f-4b2b-84b7-98c1082a1bb6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8mpcc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-bbznt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.094948 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-dlvd4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"701c1e82-a66e-40d9-884e-2d59449edccc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6004b16855be03a239d2bba05b7e17197f774b9a619b0bf825247012b1664427\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fnc94\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-dlvd4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.109448 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.109501 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.109519 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.109544 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.109560 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:52Z","lastTransitionTime":"2025-11-26T22:22:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.111893 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7ab8c5eb-fded-446c-8645-4e529cc12a2e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0c79be23ae25bb1672628bae5152c8ba607b3afaa8eaefa0060cbf0480a673fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1a0fb986ff4e8a7b0a593d07e4d3db62972a7ff1369a2d65a9ec2a2a3660dbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b1a0fb986ff4e8a7b0a593d07e4d3db62972a7ff1369a2d65a9ec2a2a3660dbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.131404 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88d7e8bd8de8dfea29fb16e72fcd9951686df22b3fb1ad68d8f42201051fac7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.160207 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://effdc360fbca6cb42bc04cbdaa72ce975b877cc7a62e46e357458e45b11cf454\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://24150b4c4a64c6ae779515e90427bbc1254c34bdcc1cef9f1aa16742f7c48439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.181072 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.199425 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:36Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6c5f779756f2cf7aa8739c8dd4c923cd243ef3160812de87b523c5b742433f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.213613 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.213670 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.213688 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.213735 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.213753 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:52Z","lastTransitionTime":"2025-11-26T22:22:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.217319 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"232b7aad-b4bd-495a-a411-0cfd48fa372c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b9bcdfbff4150e4e2dbc54aef788fb57f8d585dad4b01cb27880d39480e062e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rrnqs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-wjwph\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.236436 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bxnsh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"229974d7-7b78-434b-a346-8b9004e69bf2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://969475c82879642645019fdc1665aaf0dc4c167c07887a47d02b4d5be0b6d66a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T22:22:22Z\\\",\\\"message\\\":\\\"2025-11-26T22:21:36+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_2b5e1290-d4c2-482e-9633-f17cd83ccefd\\\\n2025-11-26T22:21:36+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_2b5e1290-d4c2-482e-9633-f17cd83ccefd to /host/opt/cni/bin/\\\\n2025-11-26T22:21:37Z [verbose] multus-daemon started\\\\n2025-11-26T22:21:37Z [verbose] Readiness Indicator file check\\\\n2025-11-26T22:22:22Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:22:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72nv6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:34Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bxnsh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.253014 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"18039c76-4e57-465f-9918-e618c823dff7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eecaea6ba41a970d1434000705f94cc7c998d76bc9b0721d5955e7bea7dde57a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8876ce44b1ced5ec6305e0a4645258247d129d9d4895fb51fa7bd91c990e64f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zjk2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-ft62j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.269855 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43088254-0fdc-4d7a-87cb-92306a8dc3ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8c2e4e68e3ec73eb79683a1de97bb7b5ef13c48db46c21b18f4bcee08be716a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3023824bcd9e429032a9d99b81a4b821b5c9382cefc352f53ad75a87a1088d66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7612eb84c353536c5410b0149cdd9a96cd3a47311a0a49f0865d9f5a1d5dd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5140752950b03c2f8e78fb277116a8ef2df734ac613cf7b1af4c78876547c8ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5140752950b03c2f8e78fb277116a8ef2df734ac613cf7b1af4c78876547c8ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.289658 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.309104 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.315935 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.315996 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.316013 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.316040 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.316082 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:52Z","lastTransitionTime":"2025-11-26T22:22:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.325266 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.345851 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7f049a52-ec02-4f6f-9856-00f50e8f0293\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 22:21:25.807190 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 22:21:25.809873 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1245934581/tls.crt::/tmp/serving-cert-1245934581/tls.key\\\\\\\"\\\\nI1126 22:21:32.310640 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 22:21:32.319816 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 22:21:32.319846 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 22:21:32.319876 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 22:21:32.319882 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 22:21:32.332209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 22:21:32.332247 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 22:21:32.332240 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 22:21:32.332283 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 22:21:32.332317 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 22:21:32.332322 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 22:21:32.332326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 22:21:32.332330 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 22:21:32.337043 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.364646 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dcc46b6d-7aec-4466-ba0b-46d06a935d0f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://575c57d478b4cf326bc870cb5432daf94d53c42bc6b1762cd4e03dd37a864676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a35d77eee10994dee7b84e75c9a44fb36958ba106362470013ae9c6dfc5145cb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://256b750bb0834d9a3174b0746c9caf1bf480db5f6eb29506036529fd04267bd5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.380753 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-q8dvw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef28737-00fd-4738-ae1f-e02a5b974905\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2k42\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-q8dvw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:22:52Z is after 2025-08-24T17:21:41Z" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.420203 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.420252 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.420266 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.420285 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.420298 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:52Z","lastTransitionTime":"2025-11-26T22:22:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.523647 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.523735 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.523749 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.523767 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.523778 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:52Z","lastTransitionTime":"2025-11-26T22:22:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.625882 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.625961 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.625986 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.626014 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.626038 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:52Z","lastTransitionTime":"2025-11-26T22:22:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.729467 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.729518 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.729534 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.729557 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.729573 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:52Z","lastTransitionTime":"2025-11-26T22:22:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.834656 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.834761 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.834781 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.834808 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.834830 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:52Z","lastTransitionTime":"2025-11-26T22:22:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.911637 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/aef28737-00fd-4738-ae1f-e02a5b974905-metrics-certs\") pod \"network-metrics-daemon-q8dvw\" (UID: \"aef28737-00fd-4738-ae1f-e02a5b974905\") " pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:52 crc kubenswrapper[4903]: E1126 22:22:52.911891 4903 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 22:22:52 crc kubenswrapper[4903]: E1126 22:22:52.912001 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/aef28737-00fd-4738-ae1f-e02a5b974905-metrics-certs podName:aef28737-00fd-4738-ae1f-e02a5b974905 nodeName:}" failed. No retries permitted until 2025-11-26 22:23:56.91196793 +0000 UTC m=+165.602202870 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/aef28737-00fd-4738-ae1f-e02a5b974905-metrics-certs") pod "network-metrics-daemon-q8dvw" (UID: "aef28737-00fd-4738-ae1f-e02a5b974905") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.938021 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.938081 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.938098 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.938120 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:52 crc kubenswrapper[4903]: I1126 22:22:52.938140 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:52Z","lastTransitionTime":"2025-11-26T22:22:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.028436 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:53 crc kubenswrapper[4903]: E1126 22:22:53.028580 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.028662 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.028821 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:53 crc kubenswrapper[4903]: E1126 22:22:53.028970 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:22:53 crc kubenswrapper[4903]: E1126 22:22:53.029079 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.040860 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.040910 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.040921 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.040939 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.040951 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:53Z","lastTransitionTime":"2025-11-26T22:22:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.143900 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.143934 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.143943 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.143955 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.143966 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:53Z","lastTransitionTime":"2025-11-26T22:22:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.246658 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.246761 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.246785 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.246813 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.246835 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:53Z","lastTransitionTime":"2025-11-26T22:22:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.349451 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.349523 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.349546 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.349574 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.349596 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:53Z","lastTransitionTime":"2025-11-26T22:22:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.451640 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.451734 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.451751 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.451773 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.451790 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:53Z","lastTransitionTime":"2025-11-26T22:22:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.554596 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.554657 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.554674 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.554744 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.554762 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:53Z","lastTransitionTime":"2025-11-26T22:22:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.657457 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.657526 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.657543 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.657569 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.657587 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:53Z","lastTransitionTime":"2025-11-26T22:22:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.760787 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.760839 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.760861 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.760890 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.760911 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:53Z","lastTransitionTime":"2025-11-26T22:22:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.864017 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.864077 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.864096 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.864120 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.864137 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:53Z","lastTransitionTime":"2025-11-26T22:22:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.966477 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.966543 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.966558 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.966583 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:53 crc kubenswrapper[4903]: I1126 22:22:53.966601 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:53Z","lastTransitionTime":"2025-11-26T22:22:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.028427 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:54 crc kubenswrapper[4903]: E1126 22:22:54.028621 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.069204 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.069261 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.069277 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.069298 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.069339 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:54Z","lastTransitionTime":"2025-11-26T22:22:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.172117 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.172193 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.172211 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.172234 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.172254 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:54Z","lastTransitionTime":"2025-11-26T22:22:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.274876 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.274933 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.274952 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.274975 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.274991 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:54Z","lastTransitionTime":"2025-11-26T22:22:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.378447 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.378500 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.378517 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.378538 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.378554 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:54Z","lastTransitionTime":"2025-11-26T22:22:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.481137 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.481188 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.481206 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.481228 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.481246 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:54Z","lastTransitionTime":"2025-11-26T22:22:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.585334 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.585503 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.585528 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.585559 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.585581 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:54Z","lastTransitionTime":"2025-11-26T22:22:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.687799 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.687867 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.687901 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.687919 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.687933 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:54Z","lastTransitionTime":"2025-11-26T22:22:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.791108 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.791168 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.791186 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.791210 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.791227 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:54Z","lastTransitionTime":"2025-11-26T22:22:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.893672 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.893784 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.893807 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.893836 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.893855 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:54Z","lastTransitionTime":"2025-11-26T22:22:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.997245 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.997309 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.997333 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.997364 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:54 crc kubenswrapper[4903]: I1126 22:22:54.997386 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:54Z","lastTransitionTime":"2025-11-26T22:22:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.027970 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.028002 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.028083 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:55 crc kubenswrapper[4903]: E1126 22:22:55.028119 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:22:55 crc kubenswrapper[4903]: E1126 22:22:55.028270 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:22:55 crc kubenswrapper[4903]: E1126 22:22:55.028350 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.100189 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.100238 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.100255 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.100279 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.100295 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:55Z","lastTransitionTime":"2025-11-26T22:22:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.202911 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.203000 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.203023 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.203053 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.203092 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:55Z","lastTransitionTime":"2025-11-26T22:22:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.305214 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.305304 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.305350 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.305369 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.305380 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:55Z","lastTransitionTime":"2025-11-26T22:22:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.408249 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.408286 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.408296 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.408309 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.408320 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:55Z","lastTransitionTime":"2025-11-26T22:22:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.511543 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.511619 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.511639 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.511663 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.511680 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:55Z","lastTransitionTime":"2025-11-26T22:22:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.614615 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.614662 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.614680 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.614754 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.614772 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:55Z","lastTransitionTime":"2025-11-26T22:22:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.717808 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.717857 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.717869 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.717886 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.717901 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:55Z","lastTransitionTime":"2025-11-26T22:22:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.820976 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.821036 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.821054 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.821078 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.821094 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:55Z","lastTransitionTime":"2025-11-26T22:22:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.924496 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.924559 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.924575 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.924598 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:55 crc kubenswrapper[4903]: I1126 22:22:55.924619 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:55Z","lastTransitionTime":"2025-11-26T22:22:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.030001 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.030057 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.030423 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.030521 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.030613 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.030726 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:56Z","lastTransitionTime":"2025-11-26T22:22:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:56 crc kubenswrapper[4903]: E1126 22:22:56.032417 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.133994 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.134031 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.134039 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.134053 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.134061 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:56Z","lastTransitionTime":"2025-11-26T22:22:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.236100 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.236151 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.236168 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.236189 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.236205 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:56Z","lastTransitionTime":"2025-11-26T22:22:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.338936 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.338973 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.338985 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.339002 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.339013 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:56Z","lastTransitionTime":"2025-11-26T22:22:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.442405 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.442437 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.442449 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.442466 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.442478 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:56Z","lastTransitionTime":"2025-11-26T22:22:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.545459 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.545519 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.545543 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.545573 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.545598 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:56Z","lastTransitionTime":"2025-11-26T22:22:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.648407 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.648479 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.648501 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.648529 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.648550 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:56Z","lastTransitionTime":"2025-11-26T22:22:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.750831 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.750884 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.750898 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.750916 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.750927 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:56Z","lastTransitionTime":"2025-11-26T22:22:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.853786 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.853844 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.853856 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.853874 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.853886 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:56Z","lastTransitionTime":"2025-11-26T22:22:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.956403 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.956454 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.956465 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.956485 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:56 crc kubenswrapper[4903]: I1126 22:22:56.956497 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:56Z","lastTransitionTime":"2025-11-26T22:22:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.027926 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.028015 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.027926 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:57 crc kubenswrapper[4903]: E1126 22:22:57.028154 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:22:57 crc kubenswrapper[4903]: E1126 22:22:57.028329 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:22:57 crc kubenswrapper[4903]: E1126 22:22:57.028404 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.059856 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.059930 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.059952 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.059984 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.060005 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:57Z","lastTransitionTime":"2025-11-26T22:22:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.163235 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.163296 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.163313 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.163337 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.163354 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:57Z","lastTransitionTime":"2025-11-26T22:22:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.265479 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.265588 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.265614 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.265641 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.265662 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:57Z","lastTransitionTime":"2025-11-26T22:22:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.368623 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.368679 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.368735 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.368760 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.368779 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:57Z","lastTransitionTime":"2025-11-26T22:22:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.470967 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.471013 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.471024 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.471044 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.471055 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:57Z","lastTransitionTime":"2025-11-26T22:22:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.573520 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.573583 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.573601 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.573627 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.573643 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:57Z","lastTransitionTime":"2025-11-26T22:22:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.675892 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.675952 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.675968 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.675992 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.676009 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:57Z","lastTransitionTime":"2025-11-26T22:22:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.779027 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.779079 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.779096 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.779120 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.779138 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:57Z","lastTransitionTime":"2025-11-26T22:22:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.882061 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.882127 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.882150 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.882181 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.882202 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:57Z","lastTransitionTime":"2025-11-26T22:22:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.985032 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.985099 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.985119 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.985145 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:57 crc kubenswrapper[4903]: I1126 22:22:57.985161 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:57Z","lastTransitionTime":"2025-11-26T22:22:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.027554 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:22:58 crc kubenswrapper[4903]: E1126 22:22:58.027837 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.029370 4903 scope.go:117] "RemoveContainer" containerID="93ba50c6aca389bf5400db19615fbeb2a035b284237a1fdbfa1d6e7d4190d06f" Nov 26 22:22:58 crc kubenswrapper[4903]: E1126 22:22:58.029757 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-bbznt_openshift-ovn-kubernetes(ef55a921-a95f-4b2b-84b7-98c1082a1bb6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.088385 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.088430 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.088441 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.088458 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.088471 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:58Z","lastTransitionTime":"2025-11-26T22:22:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.197073 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.197143 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.197156 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.197185 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.197199 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:58Z","lastTransitionTime":"2025-11-26T22:22:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.300010 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.300062 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.300079 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.300105 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.300123 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:58Z","lastTransitionTime":"2025-11-26T22:22:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.403255 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.403314 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.403331 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.403357 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.403376 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:58Z","lastTransitionTime":"2025-11-26T22:22:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.506719 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.506772 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.506789 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.506813 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.506830 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:58Z","lastTransitionTime":"2025-11-26T22:22:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.609877 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.609976 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.609995 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.610060 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.610079 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:58Z","lastTransitionTime":"2025-11-26T22:22:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.713613 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.713743 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.713772 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.713811 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.713835 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:58Z","lastTransitionTime":"2025-11-26T22:22:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.817561 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.817624 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.817645 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.817671 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.817745 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:58Z","lastTransitionTime":"2025-11-26T22:22:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.920887 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.920940 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.920956 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.920979 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:58 crc kubenswrapper[4903]: I1126 22:22:58.920995 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:58Z","lastTransitionTime":"2025-11-26T22:22:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.023053 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.023098 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.023110 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.023128 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.023141 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:59Z","lastTransitionTime":"2025-11-26T22:22:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.027534 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.027578 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.027578 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:22:59 crc kubenswrapper[4903]: E1126 22:22:59.027653 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:22:59 crc kubenswrapper[4903]: E1126 22:22:59.027951 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:22:59 crc kubenswrapper[4903]: E1126 22:22:59.028539 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.125814 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.125864 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.125880 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.125897 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.125910 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:59Z","lastTransitionTime":"2025-11-26T22:22:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.228965 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.229026 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.229048 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.229077 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.229102 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:59Z","lastTransitionTime":"2025-11-26T22:22:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.332592 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.332634 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.332647 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.332665 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.332677 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:59Z","lastTransitionTime":"2025-11-26T22:22:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.435365 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.435416 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.435427 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.435444 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.435455 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:59Z","lastTransitionTime":"2025-11-26T22:22:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.537551 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.537602 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.537613 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.537630 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.537642 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:59Z","lastTransitionTime":"2025-11-26T22:22:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.640412 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.640459 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.640473 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.640489 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.640502 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:59Z","lastTransitionTime":"2025-11-26T22:22:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.743735 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.743786 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.743800 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.743819 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.743832 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:59Z","lastTransitionTime":"2025-11-26T22:22:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.846664 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.846722 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.846737 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.846753 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.846764 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:59Z","lastTransitionTime":"2025-11-26T22:22:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.949537 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.949619 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.949633 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.949651 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:22:59 crc kubenswrapper[4903]: I1126 22:22:59.949665 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:22:59Z","lastTransitionTime":"2025-11-26T22:22:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.028031 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:23:00 crc kubenswrapper[4903]: E1126 22:23:00.028199 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.052660 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.052718 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.052733 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.052751 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.052765 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:00Z","lastTransitionTime":"2025-11-26T22:23:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.155857 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.155911 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.155928 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.155950 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.155967 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:00Z","lastTransitionTime":"2025-11-26T22:23:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.258735 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.258793 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.258809 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.258830 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.258847 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:00Z","lastTransitionTime":"2025-11-26T22:23:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.362393 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.362453 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.362470 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.362494 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.362511 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:00Z","lastTransitionTime":"2025-11-26T22:23:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.465013 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.465084 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.465105 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.465132 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.465154 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:00Z","lastTransitionTime":"2025-11-26T22:23:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.568470 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.568540 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.568557 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.568581 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.568600 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:00Z","lastTransitionTime":"2025-11-26T22:23:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.671252 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.671323 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.671340 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.671364 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.671382 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:00Z","lastTransitionTime":"2025-11-26T22:23:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.773828 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.773882 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.773895 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.773910 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.773921 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:00Z","lastTransitionTime":"2025-11-26T22:23:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.881254 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.881321 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.881338 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.881363 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.881380 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:00Z","lastTransitionTime":"2025-11-26T22:23:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.984237 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.984293 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.984306 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.984324 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:00 crc kubenswrapper[4903]: I1126 22:23:00.984337 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:00Z","lastTransitionTime":"2025-11-26T22:23:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.050488 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.050526 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.050584 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:23:01 crc kubenswrapper[4903]: E1126 22:23:01.050688 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:23:01 crc kubenswrapper[4903]: E1126 22:23:01.050810 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:23:01 crc kubenswrapper[4903]: E1126 22:23:01.050911 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.086910 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.086953 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.086969 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.086990 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.087006 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:01Z","lastTransitionTime":"2025-11-26T22:23:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.189479 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.189524 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.189541 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.189563 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.189578 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:01Z","lastTransitionTime":"2025-11-26T22:23:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.251082 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.251323 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.251528 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.251683 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.251864 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:01Z","lastTransitionTime":"2025-11-26T22:23:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:01 crc kubenswrapper[4903]: E1126 22:23:01.273245 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:23:01Z is after 2025-08-24T17:21:41Z" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.279061 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.279284 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.279430 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.279748 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.279894 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:01Z","lastTransitionTime":"2025-11-26T22:23:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:01 crc kubenswrapper[4903]: E1126 22:23:01.300932 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:23:01Z is after 2025-08-24T17:21:41Z" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.305969 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.306038 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.306061 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.306115 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.306135 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:01Z","lastTransitionTime":"2025-11-26T22:23:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:01 crc kubenswrapper[4903]: E1126 22:23:01.326903 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:23:01Z is after 2025-08-24T17:21:41Z" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.331838 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.331898 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.331915 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.331940 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.331956 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:01Z","lastTransitionTime":"2025-11-26T22:23:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:01 crc kubenswrapper[4903]: E1126 22:23:01.353082 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:23:01Z is after 2025-08-24T17:21:41Z" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.357603 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.357835 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.357977 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.358126 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.358266 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:01Z","lastTransitionTime":"2025-11-26T22:23:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:01 crc kubenswrapper[4903]: E1126 22:23:01.377767 4903 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T22:23:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d47a3d9-b3b7-4680-8967-b1b51d436e50\\\",\\\"systemUUID\\\":\\\"4bd090e0-3377-4de9-8ab0-9d1eda387d4f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:23:01Z is after 2025-08-24T17:21:41Z" Nov 26 22:23:01 crc kubenswrapper[4903]: E1126 22:23:01.378272 4903 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.380123 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.380163 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.380180 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.380199 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.380213 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:01Z","lastTransitionTime":"2025-11-26T22:23:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.483156 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.483371 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.483510 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.483660 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.483836 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:01Z","lastTransitionTime":"2025-11-26T22:23:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.586363 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.586411 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.586427 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.586448 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.586467 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:01Z","lastTransitionTime":"2025-11-26T22:23:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.689113 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.689877 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.690029 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.690179 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.690326 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:01Z","lastTransitionTime":"2025-11-26T22:23:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.793161 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.793524 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.793679 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.793987 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.794132 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:01Z","lastTransitionTime":"2025-11-26T22:23:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.897292 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.897329 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.897341 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.897359 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.897372 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:01Z","lastTransitionTime":"2025-11-26T22:23:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.999639 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.999757 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.999785 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.999816 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:01 crc kubenswrapper[4903]: I1126 22:23:01.999838 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:01Z","lastTransitionTime":"2025-11-26T22:23:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.027981 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:23:02 crc kubenswrapper[4903]: E1126 22:23:02.028476 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.045900 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43088254-0fdc-4d7a-87cb-92306a8dc3ab\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:22:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8c2e4e68e3ec73eb79683a1de97bb7b5ef13c48db46c21b18f4bcee08be716a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3023824bcd9e429032a9d99b81a4b821b5c9382cefc352f53ad75a87a1088d66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef7612eb84c353536c5410b0149cdd9a96cd3a47311a0a49f0865d9f5a1d5dd2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5140752950b03c2f8e78fb277116a8ef2df734ac613cf7b1af4c78876547c8ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5140752950b03c2f8e78fb277116a8ef2df734ac613cf7b1af4c78876547c8ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T22:21:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T22:21:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:23:02Z is after 2025-08-24T17:21:41Z" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.064278 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:23:02Z is after 2025-08-24T17:21:41Z" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.102769 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.102816 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.102832 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.102856 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.103718 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:02Z","lastTransitionTime":"2025-11-26T22:23:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.104980 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:32Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:23:02Z is after 2025-08-24T17:21:41Z" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.117958 4903 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-knwk2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77f835e9-1a25-43f2-9c32-5d5311495723\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T22:21:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c45d4d7fbf0dccb72a3d46352b89a8bd329948b3c6d0f60186829eba57243a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T22:21:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-72n8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T22:21:37Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-knwk2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T22:23:02Z is after 2025-08-24T17:21:41Z" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.153159 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-ft62j" podStartSLOduration=89.15311746 podStartE2EDuration="1m29.15311746s" podCreationTimestamp="2025-11-26 22:21:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:02.152930385 +0000 UTC m=+110.843165345" watchObservedRunningTime="2025-11-26 22:23:02.15311746 +0000 UTC m=+110.843352370" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.207617 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=89.20758513 podStartE2EDuration="1m29.20758513s" podCreationTimestamp="2025-11-26 22:21:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:02.17942792 +0000 UTC m=+110.869662830" watchObservedRunningTime="2025-11-26 22:23:02.20758513 +0000 UTC m=+110.897820080" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.207902 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.207949 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.207969 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.207998 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.208019 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:02Z","lastTransitionTime":"2025-11-26T22:23:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.207941 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=89.207928109 podStartE2EDuration="1m29.207928109s" podCreationTimestamp="2025-11-26 22:21:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:02.207524148 +0000 UTC m=+110.897759098" watchObservedRunningTime="2025-11-26 22:23:02.207928109 +0000 UTC m=+110.898163099" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.275402 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=10.275378314 podStartE2EDuration="10.275378314s" podCreationTimestamp="2025-11-26 22:22:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:02.274811339 +0000 UTC m=+110.965046289" watchObservedRunningTime="2025-11-26 22:23:02.275378314 +0000 UTC m=+110.965613264" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.275897 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=21.275889838 podStartE2EDuration="21.275889838s" podCreationTimestamp="2025-11-26 22:22:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:02.237313863 +0000 UTC m=+110.927548813" watchObservedRunningTime="2025-11-26 22:23:02.275889838 +0000 UTC m=+110.966124788" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.310545 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.310616 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.310637 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.310667 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.310688 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:02Z","lastTransitionTime":"2025-11-26T22:23:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.344491 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-2z7vf" podStartSLOduration=90.344466264 podStartE2EDuration="1m30.344466264s" podCreationTimestamp="2025-11-26 22:21:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:02.344094843 +0000 UTC m=+111.034329793" watchObservedRunningTime="2025-11-26 22:23:02.344466264 +0000 UTC m=+111.034701204" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.407354 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-dlvd4" podStartSLOduration=90.407331792 podStartE2EDuration="1m30.407331792s" podCreationTimestamp="2025-11-26 22:21:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:02.388759874 +0000 UTC m=+111.078994814" watchObservedRunningTime="2025-11-26 22:23:02.407331792 +0000 UTC m=+111.097566742" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.413661 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.413904 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.414119 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.414332 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.414528 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:02Z","lastTransitionTime":"2025-11-26T22:23:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.435621 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podStartSLOduration=90.435595646 podStartE2EDuration="1m30.435595646s" podCreationTimestamp="2025-11-26 22:21:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:02.435531214 +0000 UTC m=+111.125766144" watchObservedRunningTime="2025-11-26 22:23:02.435595646 +0000 UTC m=+111.125830586" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.457673 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-bxnsh" podStartSLOduration=90.457653018 podStartE2EDuration="1m30.457653018s" podCreationTimestamp="2025-11-26 22:21:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:02.457301049 +0000 UTC m=+111.147535999" watchObservedRunningTime="2025-11-26 22:23:02.457653018 +0000 UTC m=+111.147887958" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.517949 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.517990 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.518000 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.518015 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.518023 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:02Z","lastTransitionTime":"2025-11-26T22:23:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.622118 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.622632 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.622660 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.622737 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.622756 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:02Z","lastTransitionTime":"2025-11-26T22:23:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.726295 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.726351 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.726369 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.726392 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.726410 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:02Z","lastTransitionTime":"2025-11-26T22:23:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.829515 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.829565 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.829579 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.829598 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.829612 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:02Z","lastTransitionTime":"2025-11-26T22:23:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.933120 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.933227 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.933248 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.933280 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:02 crc kubenswrapper[4903]: I1126 22:23:02.933303 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:02Z","lastTransitionTime":"2025-11-26T22:23:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.028278 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.028323 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.028283 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:23:03 crc kubenswrapper[4903]: E1126 22:23:03.028546 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:23:03 crc kubenswrapper[4903]: E1126 22:23:03.028676 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:23:03 crc kubenswrapper[4903]: E1126 22:23:03.028903 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.036839 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.036895 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.036914 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.036938 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.036955 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:03Z","lastTransitionTime":"2025-11-26T22:23:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.140283 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.140343 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.140430 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.140459 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.140476 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:03Z","lastTransitionTime":"2025-11-26T22:23:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.243785 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.243852 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.243870 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.243896 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.243915 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:03Z","lastTransitionTime":"2025-11-26T22:23:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.346514 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.346571 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.346589 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.346616 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.346633 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:03Z","lastTransitionTime":"2025-11-26T22:23:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.449573 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.449647 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.449666 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.449725 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.449746 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:03Z","lastTransitionTime":"2025-11-26T22:23:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.552967 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.553024 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.553041 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.553066 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.553084 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:03Z","lastTransitionTime":"2025-11-26T22:23:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.656728 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.656778 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.656796 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.656820 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.656838 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:03Z","lastTransitionTime":"2025-11-26T22:23:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.760183 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.760244 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.760263 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.760287 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.760303 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:03Z","lastTransitionTime":"2025-11-26T22:23:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.863440 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.863495 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.863512 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.863540 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.863557 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:03Z","lastTransitionTime":"2025-11-26T22:23:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.966453 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.966519 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.966541 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.966573 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:03 crc kubenswrapper[4903]: I1126 22:23:03.966593 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:03Z","lastTransitionTime":"2025-11-26T22:23:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.027947 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:23:04 crc kubenswrapper[4903]: E1126 22:23:04.028162 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.069055 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.069109 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.069125 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.069145 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.069157 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:04Z","lastTransitionTime":"2025-11-26T22:23:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.172011 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.172066 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.172078 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.172098 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.172111 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:04Z","lastTransitionTime":"2025-11-26T22:23:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.275098 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.275144 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.275162 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.275184 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.275202 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:04Z","lastTransitionTime":"2025-11-26T22:23:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.378522 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.378571 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.378588 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.378610 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.378626 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:04Z","lastTransitionTime":"2025-11-26T22:23:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.481112 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.481183 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.481200 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.481230 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.481251 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:04Z","lastTransitionTime":"2025-11-26T22:23:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.584452 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.584509 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.584526 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.584550 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.584567 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:04Z","lastTransitionTime":"2025-11-26T22:23:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.687878 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.688003 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.688023 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.688045 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.688093 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:04Z","lastTransitionTime":"2025-11-26T22:23:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.790987 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.791057 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.791080 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.791104 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.791120 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:04Z","lastTransitionTime":"2025-11-26T22:23:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.893643 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.893757 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.893782 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.893811 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.893832 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:04Z","lastTransitionTime":"2025-11-26T22:23:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.997454 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.997520 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.997571 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.997589 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:04 crc kubenswrapper[4903]: I1126 22:23:04.997599 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:04Z","lastTransitionTime":"2025-11-26T22:23:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.028207 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.028340 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.028268 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:23:05 crc kubenswrapper[4903]: E1126 22:23:05.028483 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:23:05 crc kubenswrapper[4903]: E1126 22:23:05.028589 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:23:05 crc kubenswrapper[4903]: E1126 22:23:05.028731 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.101134 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.101201 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.101218 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.101245 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.101263 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:05Z","lastTransitionTime":"2025-11-26T22:23:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.203979 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.204138 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.204166 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.204200 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.204223 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:05Z","lastTransitionTime":"2025-11-26T22:23:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.307319 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.307379 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.307391 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.307409 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.307423 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:05Z","lastTransitionTime":"2025-11-26T22:23:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.410077 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.410126 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.410136 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.410153 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.410163 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:05Z","lastTransitionTime":"2025-11-26T22:23:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.513666 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.513775 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.513794 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.513822 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.513839 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:05Z","lastTransitionTime":"2025-11-26T22:23:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.617096 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.617176 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.617195 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.617227 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.617248 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:05Z","lastTransitionTime":"2025-11-26T22:23:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.719802 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.719918 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.719942 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.719974 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.719996 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:05Z","lastTransitionTime":"2025-11-26T22:23:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.823062 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.823124 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.823141 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.823167 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.823183 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:05Z","lastTransitionTime":"2025-11-26T22:23:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.926003 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.926055 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.926068 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.926091 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:05 crc kubenswrapper[4903]: I1126 22:23:05.926106 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:05Z","lastTransitionTime":"2025-11-26T22:23:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.027568 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:23:06 crc kubenswrapper[4903]: E1126 22:23:06.027741 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.029312 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.029348 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.029361 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.029379 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.029392 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:06Z","lastTransitionTime":"2025-11-26T22:23:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.132243 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.132306 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.132327 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.132352 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.132372 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:06Z","lastTransitionTime":"2025-11-26T22:23:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.235456 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.235525 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.235543 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.235568 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.235588 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:06Z","lastTransitionTime":"2025-11-26T22:23:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.338861 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.338955 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.338975 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.338998 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.339016 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:06Z","lastTransitionTime":"2025-11-26T22:23:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.441880 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.441952 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.441975 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.442003 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.442025 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:06Z","lastTransitionTime":"2025-11-26T22:23:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.544482 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.544546 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.544568 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.544597 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.544620 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:06Z","lastTransitionTime":"2025-11-26T22:23:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.647626 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.647671 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.647686 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.647752 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.647771 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:06Z","lastTransitionTime":"2025-11-26T22:23:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.750962 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.751019 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.751036 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.751059 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.751077 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:06Z","lastTransitionTime":"2025-11-26T22:23:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.854208 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.854250 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.854263 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.854279 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.854293 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:06Z","lastTransitionTime":"2025-11-26T22:23:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.957617 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.957674 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.957724 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.957755 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:06 crc kubenswrapper[4903]: I1126 22:23:06.957778 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:06Z","lastTransitionTime":"2025-11-26T22:23:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.027972 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.027998 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.028087 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:23:07 crc kubenswrapper[4903]: E1126 22:23:07.028388 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:23:07 crc kubenswrapper[4903]: E1126 22:23:07.028638 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:23:07 crc kubenswrapper[4903]: E1126 22:23:07.028792 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.060795 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.060857 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.060881 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.060908 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.060929 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:07Z","lastTransitionTime":"2025-11-26T22:23:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.162995 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.163052 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.163071 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.163096 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.163113 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:07Z","lastTransitionTime":"2025-11-26T22:23:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.266177 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.266238 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.266259 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.266286 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.266306 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:07Z","lastTransitionTime":"2025-11-26T22:23:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.368502 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.368542 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.368554 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.368569 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.368580 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:07Z","lastTransitionTime":"2025-11-26T22:23:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.471187 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.471244 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.471264 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.471285 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.471299 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:07Z","lastTransitionTime":"2025-11-26T22:23:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.573489 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.573528 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.573538 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.573554 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.573564 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:07Z","lastTransitionTime":"2025-11-26T22:23:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.675959 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.676009 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.676025 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.676046 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.676068 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:07Z","lastTransitionTime":"2025-11-26T22:23:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.779479 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.779529 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.779545 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.779569 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.779586 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:07Z","lastTransitionTime":"2025-11-26T22:23:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.881940 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.881977 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.881989 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.882007 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.882018 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:07Z","lastTransitionTime":"2025-11-26T22:23:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.985227 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.985270 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.985286 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.985301 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:07 crc kubenswrapper[4903]: I1126 22:23:07.985313 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:07Z","lastTransitionTime":"2025-11-26T22:23:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.027926 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:23:08 crc kubenswrapper[4903]: E1126 22:23:08.028061 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.088405 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.088447 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.088458 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.088473 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.088485 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:08Z","lastTransitionTime":"2025-11-26T22:23:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.190969 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.191011 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.191023 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.191040 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.191051 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:08Z","lastTransitionTime":"2025-11-26T22:23:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.294256 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.294314 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.294331 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.294354 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.294370 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:08Z","lastTransitionTime":"2025-11-26T22:23:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.396846 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.396919 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.396933 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.396950 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.396962 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:08Z","lastTransitionTime":"2025-11-26T22:23:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.500416 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.500495 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.500519 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.500549 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.500571 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:08Z","lastTransitionTime":"2025-11-26T22:23:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.604411 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.604488 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.604511 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.604540 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.604562 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:08Z","lastTransitionTime":"2025-11-26T22:23:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.702546 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bxnsh_229974d7-7b78-434b-a346-8b9004e69bf2/kube-multus/1.log" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.703840 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bxnsh_229974d7-7b78-434b-a346-8b9004e69bf2/kube-multus/0.log" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.703935 4903 generic.go:334] "Generic (PLEG): container finished" podID="229974d7-7b78-434b-a346-8b9004e69bf2" containerID="969475c82879642645019fdc1665aaf0dc4c167c07887a47d02b4d5be0b6d66a" exitCode=1 Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.703983 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bxnsh" event={"ID":"229974d7-7b78-434b-a346-8b9004e69bf2","Type":"ContainerDied","Data":"969475c82879642645019fdc1665aaf0dc4c167c07887a47d02b4d5be0b6d66a"} Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.704036 4903 scope.go:117] "RemoveContainer" containerID="8aae1d77ec4f162de4e3e0e8ea3e77dd5721bf360a911fe8937932efab26c4df" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.705081 4903 scope.go:117] "RemoveContainer" containerID="969475c82879642645019fdc1665aaf0dc4c167c07887a47d02b4d5be0b6d66a" Nov 26 22:23:08 crc kubenswrapper[4903]: E1126 22:23:08.705433 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-bxnsh_openshift-multus(229974d7-7b78-434b-a346-8b9004e69bf2)\"" pod="openshift-multus/multus-bxnsh" podUID="229974d7-7b78-434b-a346-8b9004e69bf2" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.707930 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.708034 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.708112 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.708154 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.708232 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:08Z","lastTransitionTime":"2025-11-26T22:23:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.808502 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-knwk2" podStartSLOduration=96.808477395 podStartE2EDuration="1m36.808477395s" podCreationTimestamp="2025-11-26 22:21:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:08.788229942 +0000 UTC m=+117.478464892" watchObservedRunningTime="2025-11-26 22:23:08.808477395 +0000 UTC m=+117.498712345" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.808639 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=65.80863305 podStartE2EDuration="1m5.80863305s" podCreationTimestamp="2025-11-26 22:22:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:08.80717021 +0000 UTC m=+117.497405120" watchObservedRunningTime="2025-11-26 22:23:08.80863305 +0000 UTC m=+117.498867990" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.811307 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.811365 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.811378 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.811402 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.811415 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:08Z","lastTransitionTime":"2025-11-26T22:23:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.914918 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.914976 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.914991 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.915011 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:08 crc kubenswrapper[4903]: I1126 22:23:08.915025 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:08Z","lastTransitionTime":"2025-11-26T22:23:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.018178 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.018249 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.018272 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.018302 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.018319 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:09Z","lastTransitionTime":"2025-11-26T22:23:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.027837 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.027861 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.027899 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:23:09 crc kubenswrapper[4903]: E1126 22:23:09.027990 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:23:09 crc kubenswrapper[4903]: E1126 22:23:09.028228 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:23:09 crc kubenswrapper[4903]: E1126 22:23:09.028310 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.121151 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.121206 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.121222 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.121249 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.121267 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:09Z","lastTransitionTime":"2025-11-26T22:23:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.222828 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.222853 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.222861 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.222873 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.222881 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:09Z","lastTransitionTime":"2025-11-26T22:23:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.325944 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.326043 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.326065 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.326151 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.326178 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:09Z","lastTransitionTime":"2025-11-26T22:23:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.428732 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.428798 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.428808 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.428825 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.428835 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:09Z","lastTransitionTime":"2025-11-26T22:23:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.531101 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.531137 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.531150 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.531166 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.531177 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:09Z","lastTransitionTime":"2025-11-26T22:23:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.633952 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.633990 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.634001 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.634018 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.634030 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:09Z","lastTransitionTime":"2025-11-26T22:23:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.708646 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bxnsh_229974d7-7b78-434b-a346-8b9004e69bf2/kube-multus/1.log" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.737006 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.737066 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.737083 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.737106 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.737123 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:09Z","lastTransitionTime":"2025-11-26T22:23:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.839482 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.839537 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.839557 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.839584 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.839601 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:09Z","lastTransitionTime":"2025-11-26T22:23:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.942937 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.943000 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.943019 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.943045 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:09 crc kubenswrapper[4903]: I1126 22:23:09.943063 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:09Z","lastTransitionTime":"2025-11-26T22:23:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.027797 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:23:10 crc kubenswrapper[4903]: E1126 22:23:10.027990 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.046079 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.046140 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.046157 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.046181 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.046201 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:10Z","lastTransitionTime":"2025-11-26T22:23:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.148836 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.148885 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.148896 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.148916 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.148926 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:10Z","lastTransitionTime":"2025-11-26T22:23:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.252054 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.252120 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.252141 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.252169 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.252189 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:10Z","lastTransitionTime":"2025-11-26T22:23:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.355559 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.355661 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.355680 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.355734 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.355753 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:10Z","lastTransitionTime":"2025-11-26T22:23:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.458595 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.458670 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.458688 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.458739 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.458757 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:10Z","lastTransitionTime":"2025-11-26T22:23:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.561474 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.561553 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.561577 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.561602 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.561620 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:10Z","lastTransitionTime":"2025-11-26T22:23:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.665076 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.665144 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.665162 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.665187 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.665204 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:10Z","lastTransitionTime":"2025-11-26T22:23:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.768768 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.768828 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.768849 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.768878 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.768899 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:10Z","lastTransitionTime":"2025-11-26T22:23:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.873632 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.873768 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.873788 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.873815 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.873835 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:10Z","lastTransitionTime":"2025-11-26T22:23:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.980096 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.980159 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.980180 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.980205 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:10 crc kubenswrapper[4903]: I1126 22:23:10.980230 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:10Z","lastTransitionTime":"2025-11-26T22:23:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.028127 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.028192 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.028212 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:23:11 crc kubenswrapper[4903]: E1126 22:23:11.028351 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:23:11 crc kubenswrapper[4903]: E1126 22:23:11.028503 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:23:11 crc kubenswrapper[4903]: E1126 22:23:11.028675 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.083904 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.083954 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.083971 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.083993 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.084009 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:11Z","lastTransitionTime":"2025-11-26T22:23:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.187291 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.187331 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.187342 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.187358 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.187369 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:11Z","lastTransitionTime":"2025-11-26T22:23:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.290959 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.291028 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.291050 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.291074 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.291091 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:11Z","lastTransitionTime":"2025-11-26T22:23:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.394399 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.394448 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.394466 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.394489 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.394507 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:11Z","lastTransitionTime":"2025-11-26T22:23:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.497829 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.497888 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.497905 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.497932 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.497950 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:11Z","lastTransitionTime":"2025-11-26T22:23:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.601359 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.601518 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.601549 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.601624 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.601652 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:11Z","lastTransitionTime":"2025-11-26T22:23:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.677947 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.678014 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.678030 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.678055 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.678072 4903 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T22:23:11Z","lastTransitionTime":"2025-11-26T22:23:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.764936 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-csdgl"] Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.765459 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-csdgl" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.768861 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.771103 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.771146 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.771383 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.824866 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/bae7eeb2-ba57-4129-beab-3c2484ed38c6-service-ca\") pod \"cluster-version-operator-5c965bbfc6-csdgl\" (UID: \"bae7eeb2-ba57-4129-beab-3c2484ed38c6\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-csdgl" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.824908 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/bae7eeb2-ba57-4129-beab-3c2484ed38c6-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-csdgl\" (UID: \"bae7eeb2-ba57-4129-beab-3c2484ed38c6\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-csdgl" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.824936 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bae7eeb2-ba57-4129-beab-3c2484ed38c6-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-csdgl\" (UID: \"bae7eeb2-ba57-4129-beab-3c2484ed38c6\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-csdgl" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.824976 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bae7eeb2-ba57-4129-beab-3c2484ed38c6-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-csdgl\" (UID: \"bae7eeb2-ba57-4129-beab-3c2484ed38c6\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-csdgl" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.824994 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/bae7eeb2-ba57-4129-beab-3c2484ed38c6-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-csdgl\" (UID: \"bae7eeb2-ba57-4129-beab-3c2484ed38c6\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-csdgl" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.926461 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/bae7eeb2-ba57-4129-beab-3c2484ed38c6-service-ca\") pod \"cluster-version-operator-5c965bbfc6-csdgl\" (UID: \"bae7eeb2-ba57-4129-beab-3c2484ed38c6\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-csdgl" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.926546 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/bae7eeb2-ba57-4129-beab-3c2484ed38c6-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-csdgl\" (UID: \"bae7eeb2-ba57-4129-beab-3c2484ed38c6\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-csdgl" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.926585 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bae7eeb2-ba57-4129-beab-3c2484ed38c6-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-csdgl\" (UID: \"bae7eeb2-ba57-4129-beab-3c2484ed38c6\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-csdgl" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.926682 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bae7eeb2-ba57-4129-beab-3c2484ed38c6-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-csdgl\" (UID: \"bae7eeb2-ba57-4129-beab-3c2484ed38c6\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-csdgl" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.926713 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/bae7eeb2-ba57-4129-beab-3c2484ed38c6-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-csdgl\" (UID: \"bae7eeb2-ba57-4129-beab-3c2484ed38c6\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-csdgl" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.926818 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/bae7eeb2-ba57-4129-beab-3c2484ed38c6-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-csdgl\" (UID: \"bae7eeb2-ba57-4129-beab-3c2484ed38c6\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-csdgl" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.926940 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/bae7eeb2-ba57-4129-beab-3c2484ed38c6-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-csdgl\" (UID: \"bae7eeb2-ba57-4129-beab-3c2484ed38c6\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-csdgl" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.928015 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/bae7eeb2-ba57-4129-beab-3c2484ed38c6-service-ca\") pod \"cluster-version-operator-5c965bbfc6-csdgl\" (UID: \"bae7eeb2-ba57-4129-beab-3c2484ed38c6\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-csdgl" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.940875 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bae7eeb2-ba57-4129-beab-3c2484ed38c6-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-csdgl\" (UID: \"bae7eeb2-ba57-4129-beab-3c2484ed38c6\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-csdgl" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.953386 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 26 22:23:11 crc kubenswrapper[4903]: I1126 22:23:11.965671 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bae7eeb2-ba57-4129-beab-3c2484ed38c6-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-csdgl\" (UID: \"bae7eeb2-ba57-4129-beab-3c2484ed38c6\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-csdgl" Nov 26 22:23:12 crc kubenswrapper[4903]: I1126 22:23:12.027965 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:23:12 crc kubenswrapper[4903]: E1126 22:23:12.029957 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:23:12 crc kubenswrapper[4903]: E1126 22:23:12.036670 4903 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 26 22:23:12 crc kubenswrapper[4903]: I1126 22:23:12.097638 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 26 22:23:12 crc kubenswrapper[4903]: I1126 22:23:12.105827 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-csdgl" Nov 26 22:23:12 crc kubenswrapper[4903]: E1126 22:23:12.153349 4903 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 22:23:12 crc kubenswrapper[4903]: I1126 22:23:12.730306 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-csdgl" event={"ID":"bae7eeb2-ba57-4129-beab-3c2484ed38c6","Type":"ContainerStarted","Data":"8fc1b335fe6d68dd3db1741552b3ca0e7d868e99941e1c00cda5a8de9e55341d"} Nov 26 22:23:12 crc kubenswrapper[4903]: I1126 22:23:12.730659 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-csdgl" event={"ID":"bae7eeb2-ba57-4129-beab-3c2484ed38c6","Type":"ContainerStarted","Data":"d267e7d8913ac338a4ef206b8bbc5c4057ebaafc6958af78d3ebfa9a0530f439"} Nov 26 22:23:13 crc kubenswrapper[4903]: I1126 22:23:13.028040 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:23:13 crc kubenswrapper[4903]: E1126 22:23:13.028227 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:23:13 crc kubenswrapper[4903]: I1126 22:23:13.028480 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:23:13 crc kubenswrapper[4903]: I1126 22:23:13.029180 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:23:13 crc kubenswrapper[4903]: E1126 22:23:13.029340 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:23:13 crc kubenswrapper[4903]: E1126 22:23:13.029545 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:23:13 crc kubenswrapper[4903]: I1126 22:23:13.029897 4903 scope.go:117] "RemoveContainer" containerID="93ba50c6aca389bf5400db19615fbeb2a035b284237a1fdbfa1d6e7d4190d06f" Nov 26 22:23:13 crc kubenswrapper[4903]: I1126 22:23:13.736953 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bbznt_ef55a921-a95f-4b2b-84b7-98c1082a1bb6/ovnkube-controller/3.log" Nov 26 22:23:13 crc kubenswrapper[4903]: I1126 22:23:13.740250 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" event={"ID":"ef55a921-a95f-4b2b-84b7-98c1082a1bb6","Type":"ContainerStarted","Data":"7d13350527ea0ac731b90f25b1e6590105c42f27d90a4499a7f9b8eb8be911a5"} Nov 26 22:23:13 crc kubenswrapper[4903]: I1126 22:23:13.740656 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:23:13 crc kubenswrapper[4903]: I1126 22:23:13.780379 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" podStartSLOduration=100.780353999 podStartE2EDuration="1m40.780353999s" podCreationTimestamp="2025-11-26 22:21:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:13.779982859 +0000 UTC m=+122.470217799" watchObservedRunningTime="2025-11-26 22:23:13.780353999 +0000 UTC m=+122.470588929" Nov 26 22:23:13 crc kubenswrapper[4903]: I1126 22:23:13.780885 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-csdgl" podStartSLOduration=101.780876403 podStartE2EDuration="1m41.780876403s" podCreationTimestamp="2025-11-26 22:21:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:12.752709824 +0000 UTC m=+121.442944744" watchObservedRunningTime="2025-11-26 22:23:13.780876403 +0000 UTC m=+122.471111333" Nov 26 22:23:14 crc kubenswrapper[4903]: I1126 22:23:14.027971 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:23:14 crc kubenswrapper[4903]: E1126 22:23:14.028244 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:23:14 crc kubenswrapper[4903]: I1126 22:23:14.296579 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-q8dvw"] Nov 26 22:23:14 crc kubenswrapper[4903]: I1126 22:23:14.296740 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:23:14 crc kubenswrapper[4903]: E1126 22:23:14.296876 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:23:15 crc kubenswrapper[4903]: I1126 22:23:15.028017 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:23:15 crc kubenswrapper[4903]: I1126 22:23:15.028054 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:23:15 crc kubenswrapper[4903]: E1126 22:23:15.028399 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:23:15 crc kubenswrapper[4903]: E1126 22:23:15.028568 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:23:16 crc kubenswrapper[4903]: I1126 22:23:16.028264 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:23:16 crc kubenswrapper[4903]: E1126 22:23:16.028464 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:23:16 crc kubenswrapper[4903]: I1126 22:23:16.028559 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:23:16 crc kubenswrapper[4903]: E1126 22:23:16.028808 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:23:17 crc kubenswrapper[4903]: I1126 22:23:17.028045 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:23:17 crc kubenswrapper[4903]: I1126 22:23:17.028111 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:23:17 crc kubenswrapper[4903]: E1126 22:23:17.028434 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:23:17 crc kubenswrapper[4903]: E1126 22:23:17.028580 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:23:17 crc kubenswrapper[4903]: E1126 22:23:17.154994 4903 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 22:23:18 crc kubenswrapper[4903]: I1126 22:23:18.028194 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:23:18 crc kubenswrapper[4903]: I1126 22:23:18.028224 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:23:18 crc kubenswrapper[4903]: E1126 22:23:18.028392 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:23:18 crc kubenswrapper[4903]: E1126 22:23:18.028467 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:23:19 crc kubenswrapper[4903]: I1126 22:23:19.028449 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:23:19 crc kubenswrapper[4903]: I1126 22:23:19.028463 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:23:19 crc kubenswrapper[4903]: E1126 22:23:19.028654 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:23:19 crc kubenswrapper[4903]: E1126 22:23:19.028806 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:23:20 crc kubenswrapper[4903]: I1126 22:23:20.028313 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:23:20 crc kubenswrapper[4903]: E1126 22:23:20.028519 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:23:20 crc kubenswrapper[4903]: I1126 22:23:20.028609 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:23:20 crc kubenswrapper[4903]: E1126 22:23:20.028826 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:23:21 crc kubenswrapper[4903]: I1126 22:23:21.027806 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:23:21 crc kubenswrapper[4903]: E1126 22:23:21.027970 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:23:21 crc kubenswrapper[4903]: I1126 22:23:21.027806 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:23:21 crc kubenswrapper[4903]: E1126 22:23:21.028419 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:23:22 crc kubenswrapper[4903]: I1126 22:23:22.028518 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:23:22 crc kubenswrapper[4903]: I1126 22:23:22.028526 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:23:22 crc kubenswrapper[4903]: E1126 22:23:22.031191 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:23:22 crc kubenswrapper[4903]: E1126 22:23:22.031373 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:23:22 crc kubenswrapper[4903]: E1126 22:23:22.155745 4903 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 22:23:23 crc kubenswrapper[4903]: I1126 22:23:23.027819 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:23:23 crc kubenswrapper[4903]: I1126 22:23:23.027833 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:23:23 crc kubenswrapper[4903]: I1126 22:23:23.028327 4903 scope.go:117] "RemoveContainer" containerID="969475c82879642645019fdc1665aaf0dc4c167c07887a47d02b4d5be0b6d66a" Nov 26 22:23:23 crc kubenswrapper[4903]: E1126 22:23:23.028339 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:23:23 crc kubenswrapper[4903]: E1126 22:23:23.028676 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:23:23 crc kubenswrapper[4903]: I1126 22:23:23.780291 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bxnsh_229974d7-7b78-434b-a346-8b9004e69bf2/kube-multus/1.log" Nov 26 22:23:23 crc kubenswrapper[4903]: I1126 22:23:23.780651 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bxnsh" event={"ID":"229974d7-7b78-434b-a346-8b9004e69bf2","Type":"ContainerStarted","Data":"a7e8cfe57c3a57c637ffddf064cb78b7f997c1fa34e6aeee992af477cee52eb0"} Nov 26 22:23:24 crc kubenswrapper[4903]: I1126 22:23:24.028393 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:23:24 crc kubenswrapper[4903]: E1126 22:23:24.028591 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:23:24 crc kubenswrapper[4903]: I1126 22:23:24.028873 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:23:24 crc kubenswrapper[4903]: E1126 22:23:24.029105 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:23:25 crc kubenswrapper[4903]: I1126 22:23:25.027753 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:23:25 crc kubenswrapper[4903]: I1126 22:23:25.027810 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:23:25 crc kubenswrapper[4903]: E1126 22:23:25.027933 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:23:25 crc kubenswrapper[4903]: E1126 22:23:25.028066 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:23:26 crc kubenswrapper[4903]: I1126 22:23:26.028000 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:23:26 crc kubenswrapper[4903]: E1126 22:23:26.028236 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-q8dvw" podUID="aef28737-00fd-4738-ae1f-e02a5b974905" Nov 26 22:23:26 crc kubenswrapper[4903]: I1126 22:23:26.028014 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:23:26 crc kubenswrapper[4903]: E1126 22:23:26.028594 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 22:23:27 crc kubenswrapper[4903]: I1126 22:23:27.027888 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:23:27 crc kubenswrapper[4903]: I1126 22:23:27.027912 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:23:27 crc kubenswrapper[4903]: E1126 22:23:27.028612 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 22:23:27 crc kubenswrapper[4903]: E1126 22:23:27.028451 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 22:23:28 crc kubenswrapper[4903]: I1126 22:23:28.028436 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:23:28 crc kubenswrapper[4903]: I1126 22:23:28.028457 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:23:28 crc kubenswrapper[4903]: I1126 22:23:28.031428 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 26 22:23:28 crc kubenswrapper[4903]: I1126 22:23:28.032008 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 26 22:23:28 crc kubenswrapper[4903]: I1126 22:23:28.032122 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 26 22:23:28 crc kubenswrapper[4903]: I1126 22:23:28.033110 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 26 22:23:29 crc kubenswrapper[4903]: I1126 22:23:29.028442 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:23:29 crc kubenswrapper[4903]: I1126 22:23:29.028502 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:23:29 crc kubenswrapper[4903]: I1126 22:23:29.031498 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 26 22:23:29 crc kubenswrapper[4903]: I1126 22:23:29.031611 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 26 22:23:29 crc kubenswrapper[4903]: I1126 22:23:29.140168 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.215853 4903 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.270288 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-4tr72"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.271002 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-4tr72" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.285040 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-d8tw8"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.285670 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-d8tw8" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.287361 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.287961 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.288286 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.288535 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.289187 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.289448 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.290317 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-zg6dg"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.291287 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zg6dg" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.304226 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.304650 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.308554 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.308875 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.309115 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.309310 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.309510 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.310109 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.311775 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.312011 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.313977 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.316459 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-tjpcm"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.317284 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.321219 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-jnj9m"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.321814 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-jnj9m" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.322902 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.325305 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.333219 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-b7nk4"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.333827 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.351400 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.354705 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.354937 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.355298 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.358622 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.358661 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.360127 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.360239 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.360306 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.360911 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.361188 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.361357 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.361457 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.362305 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.365319 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.365427 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.365814 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.365882 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-x4kfj"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.366431 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-x4kfj" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.367228 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-x242l"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.369204 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.370085 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-btw2l"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.370274 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.370432 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-btw2l" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.370474 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-x242l" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.388421 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-sfnff"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.388997 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-7qxf7"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.389107 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/105dda6e-573f-49a6-a9a3-d29dd954ee09-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-4tr72\" (UID: \"105dda6e-573f-49a6-a9a3-d29dd954ee09\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-4tr72" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.389161 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/105dda6e-573f-49a6-a9a3-d29dd954ee09-service-ca-bundle\") pod \"authentication-operator-69f744f599-4tr72\" (UID: \"105dda6e-573f-49a6-a9a3-d29dd954ee09\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-4tr72" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.389185 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27420cfc-cc8f-4482-9206-706ab7bf9430-config\") pod \"machine-api-operator-5694c8668f-x242l\" (UID: \"27420cfc-cc8f-4482-9206-706ab7bf9430\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x242l" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.389210 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/105dda6e-573f-49a6-a9a3-d29dd954ee09-serving-cert\") pod \"authentication-operator-69f744f599-4tr72\" (UID: \"105dda6e-573f-49a6-a9a3-d29dd954ee09\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-4tr72" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.389232 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/105dda6e-573f-49a6-a9a3-d29dd954ee09-config\") pod \"authentication-operator-69f744f599-4tr72\" (UID: \"105dda6e-573f-49a6-a9a3-d29dd954ee09\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-4tr72" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.389260 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/27420cfc-cc8f-4482-9206-706ab7bf9430-images\") pod \"machine-api-operator-5694c8668f-x242l\" (UID: \"27420cfc-cc8f-4482-9206-706ab7bf9430\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x242l" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.389282 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28k7x\" (UniqueName: \"kubernetes.io/projected/27420cfc-cc8f-4482-9206-706ab7bf9430-kube-api-access-28k7x\") pod \"machine-api-operator-5694c8668f-x242l\" (UID: \"27420cfc-cc8f-4482-9206-706ab7bf9430\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x242l" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.389318 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/27420cfc-cc8f-4482-9206-706ab7bf9430-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-x242l\" (UID: \"27420cfc-cc8f-4482-9206-706ab7bf9430\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x242l" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.389354 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vt7js\" (UniqueName: \"kubernetes.io/projected/105dda6e-573f-49a6-a9a3-d29dd954ee09-kube-api-access-vt7js\") pod \"authentication-operator-69f744f599-4tr72\" (UID: \"105dda6e-573f-49a6-a9a3-d29dd954ee09\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-4tr72" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.389590 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-446xv"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.389952 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-446xv" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.390040 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-sfnff" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.390067 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.390523 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-7qxf7" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.390882 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.402712 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-fjxzh"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.403283 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-fjxzh" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.406068 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-g8r8c"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.406449 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-27m7h"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.406904 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-27m7h" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.407262 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-g8r8c" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.413278 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.413329 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.413581 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.413634 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.413783 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.413888 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.418132 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.418608 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rl8cq"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.418913 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rl8cq" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.419220 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.419827 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7dtgj"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.420379 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7dtgj" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.420644 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.438799 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.438821 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.438821 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.442239 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2s7q"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.442863 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2s7q" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.444957 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-btk26"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.445305 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-fgg79"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.445567 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-6vv92"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.445979 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-6vv92" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.446146 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-btk26" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.446275 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-fgg79" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.446898 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-lxr2h"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.447303 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-lxr2h" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.448370 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-qb8qh"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.470916 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.471128 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.471646 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-5572p"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.472842 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.473313 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.473986 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.474574 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.474667 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.475022 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.475239 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.475556 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.475818 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.475870 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.476058 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.476510 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.476543 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.476687 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.476810 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.476895 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.476976 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.477005 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.477095 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.477253 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.477274 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.477333 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.477414 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.477581 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.477717 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.477734 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.477823 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.478277 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.478436 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.478564 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.478684 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.478834 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.478961 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.479219 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.479336 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.479385 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.479492 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.479587 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.479668 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.475250 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.479888 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.479935 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.479979 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-qtskz"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.480038 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.480257 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-5572p" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.480314 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.482157 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.483327 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403255-kmtfb"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.487403 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-4fzk8"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.483447 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtskz" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.482164 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.490510 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.482245 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.482369 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.495086 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bzf7d"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.495667 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-4fzk8" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.498609 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.498822 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.499149 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.499549 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403255-kmtfb" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.500030 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bzf7d" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.500751 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505263 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/675115ca-1ad8-4ab0-a8fc-dea767d6abbc-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-jnj9m\" (UID: \"675115ca-1ad8-4ab0-a8fc-dea767d6abbc\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-jnj9m" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505285 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/edcd7b52-c1d0-4a27-8550-d4b7885eaf5e-serving-cert\") pod \"console-operator-58897d9998-446xv\" (UID: \"edcd7b52-c1d0-4a27-8550-d4b7885eaf5e\") " pod="openshift-console-operator/console-operator-58897d9998-446xv" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505317 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/15015a4f-e3d3-4042-bf77-70c01c7c05b6-client-ca\") pod \"controller-manager-879f6c89f-d8tw8\" (UID: \"15015a4f-e3d3-4042-bf77-70c01c7c05b6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-d8tw8" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505332 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/fcc90cdd-595c-4d40-908e-12b1586dfd43-audit-policies\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505348 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4115a44a-84ae-4629-970e-16c25d4f59e1-config\") pod \"machine-approver-56656f9798-zg6dg\" (UID: \"4115a44a-84ae-4629-970e-16c25d4f59e1\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zg6dg" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505362 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wgd7n\" (UniqueName: \"kubernetes.io/projected/4115a44a-84ae-4629-970e-16c25d4f59e1-kube-api-access-wgd7n\") pod \"machine-approver-56656f9798-zg6dg\" (UID: \"4115a44a-84ae-4629-970e-16c25d4f59e1\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zg6dg" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505377 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kr7rk\" (UniqueName: \"kubernetes.io/projected/15015a4f-e3d3-4042-bf77-70c01c7c05b6-kube-api-access-kr7rk\") pod \"controller-manager-879f6c89f-d8tw8\" (UID: \"15015a4f-e3d3-4042-bf77-70c01c7c05b6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-d8tw8" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505396 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505414 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-node-pullsecrets\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505430 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/cea77f03-3be2-41bd-be01-cf09fb878b3d-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-x4kfj\" (UID: \"cea77f03-3be2-41bd-be01-cf09fb878b3d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-x4kfj" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505447 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505470 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smc6m\" (UniqueName: \"kubernetes.io/projected/6698acb2-1ed3-44af-ac70-8c11bdca5c6e-kube-api-access-smc6m\") pod \"openshift-config-operator-7777fb866f-btw2l\" (UID: \"6698acb2-1ed3-44af-ac70-8c11bdca5c6e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-btw2l" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505484 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edcd7b52-c1d0-4a27-8550-d4b7885eaf5e-config\") pod \"console-operator-58897d9998-446xv\" (UID: \"edcd7b52-c1d0-4a27-8550-d4b7885eaf5e\") " pod="openshift-console-operator/console-operator-58897d9998-446xv" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505502 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/15015a4f-e3d3-4042-bf77-70c01c7c05b6-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-d8tw8\" (UID: \"15015a4f-e3d3-4042-bf77-70c01c7c05b6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-d8tw8" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505516 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-image-import-ca\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505532 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6698acb2-1ed3-44af-ac70-8c11bdca5c6e-serving-cert\") pod \"openshift-config-operator-7777fb866f-btw2l\" (UID: \"6698acb2-1ed3-44af-ac70-8c11bdca5c6e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-btw2l" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505551 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/27420cfc-cc8f-4482-9206-706ab7bf9430-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-x242l\" (UID: \"27420cfc-cc8f-4482-9206-706ab7bf9430\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x242l" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505578 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54pb4\" (UniqueName: \"kubernetes.io/projected/fcc90cdd-595c-4d40-908e-12b1586dfd43-kube-api-access-54pb4\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505597 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zlvsg\" (UniqueName: \"kubernetes.io/projected/675115ca-1ad8-4ab0-a8fc-dea767d6abbc-kube-api-access-zlvsg\") pod \"openshift-apiserver-operator-796bbdcf4f-jnj9m\" (UID: \"675115ca-1ad8-4ab0-a8fc-dea767d6abbc\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-jnj9m" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505612 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/979e7792-1bc6-482b-a63b-fd6d1227970a-config\") pod \"route-controller-manager-6576b87f9c-7vvhp\" (UID: \"979e7792-1bc6-482b-a63b-fd6d1227970a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505630 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/15015a4f-e3d3-4042-bf77-70c01c7c05b6-serving-cert\") pod \"controller-manager-879f6c89f-d8tw8\" (UID: \"15015a4f-e3d3-4042-bf77-70c01c7c05b6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-d8tw8" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505644 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-etcd-client\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505659 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-audit-dir\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505677 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505703 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505727 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/6698acb2-1ed3-44af-ac70-8c11bdca5c6e-available-featuregates\") pod \"openshift-config-operator-7777fb866f-btw2l\" (UID: \"6698acb2-1ed3-44af-ac70-8c11bdca5c6e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-btw2l" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505744 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-982sv\" (UniqueName: \"kubernetes.io/projected/cea77f03-3be2-41bd-be01-cf09fb878b3d-kube-api-access-982sv\") pod \"cluster-samples-operator-665b6dd947-x4kfj\" (UID: \"cea77f03-3be2-41bd-be01-cf09fb878b3d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-x4kfj" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505758 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/edcd7b52-c1d0-4a27-8550-d4b7885eaf5e-trusted-ca\") pod \"console-operator-58897d9998-446xv\" (UID: \"edcd7b52-c1d0-4a27-8550-d4b7885eaf5e\") " pod="openshift-console-operator/console-operator-58897d9998-446xv" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505773 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/979e7792-1bc6-482b-a63b-fd6d1227970a-client-ca\") pod \"route-controller-manager-6576b87f9c-7vvhp\" (UID: \"979e7792-1bc6-482b-a63b-fd6d1227970a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505792 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vt7js\" (UniqueName: \"kubernetes.io/projected/105dda6e-573f-49a6-a9a3-d29dd954ee09-kube-api-access-vt7js\") pod \"authentication-operator-69f744f599-4tr72\" (UID: \"105dda6e-573f-49a6-a9a3-d29dd954ee09\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-4tr72" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505807 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-config\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505825 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qb7vw\" (UniqueName: \"kubernetes.io/projected/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-kube-api-access-qb7vw\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505843 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/105dda6e-573f-49a6-a9a3-d29dd954ee09-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-4tr72\" (UID: \"105dda6e-573f-49a6-a9a3-d29dd954ee09\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-4tr72" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505858 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bsgw7\" (UniqueName: \"kubernetes.io/projected/edcd7b52-c1d0-4a27-8550-d4b7885eaf5e-kube-api-access-bsgw7\") pod \"console-operator-58897d9998-446xv\" (UID: \"edcd7b52-c1d0-4a27-8550-d4b7885eaf5e\") " pod="openshift-console-operator/console-operator-58897d9998-446xv" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505875 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.507725 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/105dda6e-573f-49a6-a9a3-d29dd954ee09-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-4tr72\" (UID: \"105dda6e-573f-49a6-a9a3-d29dd954ee09\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-4tr72" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.511665 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.513188 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/105dda6e-573f-49a6-a9a3-d29dd954ee09-service-ca-bundle\") pod \"authentication-operator-69f744f599-4tr72\" (UID: \"105dda6e-573f-49a6-a9a3-d29dd954ee09\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-4tr72" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.519853 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-5qhmj"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.519942 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/27420cfc-cc8f-4482-9206-706ab7bf9430-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-x242l\" (UID: \"27420cfc-cc8f-4482-9206-706ab7bf9430\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x242l" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.520393 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-zb6l2"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.520761 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-zb6l2" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.520969 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-5qhmj" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.524043 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.525078 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.525251 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.525104 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.525356 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gqd92"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.505892 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/105dda6e-573f-49a6-a9a3-d29dd954ee09-service-ca-bundle\") pod \"authentication-operator-69f744f599-4tr72\" (UID: \"105dda6e-573f-49a6-a9a3-d29dd954ee09\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-4tr72" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.525480 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27420cfc-cc8f-4482-9206-706ab7bf9430-config\") pod \"machine-api-operator-5694c8668f-x242l\" (UID: \"27420cfc-cc8f-4482-9206-706ab7bf9430\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x242l" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.525515 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-audit\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.525558 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/fcc90cdd-595c-4d40-908e-12b1586dfd43-audit-dir\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.525584 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.525612 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/105dda6e-573f-49a6-a9a3-d29dd954ee09-serving-cert\") pod \"authentication-operator-69f744f599-4tr72\" (UID: \"105dda6e-573f-49a6-a9a3-d29dd954ee09\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-4tr72" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.525633 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4115a44a-84ae-4629-970e-16c25d4f59e1-auth-proxy-config\") pod \"machine-approver-56656f9798-zg6dg\" (UID: \"4115a44a-84ae-4629-970e-16c25d4f59e1\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zg6dg" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.525661 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/105dda6e-573f-49a6-a9a3-d29dd954ee09-config\") pod \"authentication-operator-69f744f599-4tr72\" (UID: \"105dda6e-573f-49a6-a9a3-d29dd954ee09\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-4tr72" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.525705 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.525732 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.525752 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/4115a44a-84ae-4629-970e-16c25d4f59e1-machine-approver-tls\") pod \"machine-approver-56656f9798-zg6dg\" (UID: \"4115a44a-84ae-4629-970e-16c25d4f59e1\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zg6dg" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.525768 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-serving-cert\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.525785 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/27420cfc-cc8f-4482-9206-706ab7bf9430-images\") pod \"machine-api-operator-5694c8668f-x242l\" (UID: \"27420cfc-cc8f-4482-9206-706ab7bf9430\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x242l" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.525800 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.525826 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28k7x\" (UniqueName: \"kubernetes.io/projected/27420cfc-cc8f-4482-9206-706ab7bf9430-kube-api-access-28k7x\") pod \"machine-api-operator-5694c8668f-x242l\" (UID: \"27420cfc-cc8f-4482-9206-706ab7bf9430\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x242l" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.525855 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.525876 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-trusted-ca-bundle\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.525899 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.525920 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jzkhj\" (UniqueName: \"kubernetes.io/projected/979e7792-1bc6-482b-a63b-fd6d1227970a-kube-api-access-jzkhj\") pod \"route-controller-manager-6576b87f9c-7vvhp\" (UID: \"979e7792-1bc6-482b-a63b-fd6d1227970a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.525964 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-etcd-serving-ca\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.525986 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/979e7792-1bc6-482b-a63b-fd6d1227970a-serving-cert\") pod \"route-controller-manager-6576b87f9c-7vvhp\" (UID: \"979e7792-1bc6-482b-a63b-fd6d1227970a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.526018 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/675115ca-1ad8-4ab0-a8fc-dea767d6abbc-config\") pod \"openshift-apiserver-operator-796bbdcf4f-jnj9m\" (UID: \"675115ca-1ad8-4ab0-a8fc-dea767d6abbc\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-jnj9m" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.526050 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/15015a4f-e3d3-4042-bf77-70c01c7c05b6-config\") pod \"controller-manager-879f6c89f-d8tw8\" (UID: \"15015a4f-e3d3-4042-bf77-70c01c7c05b6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-d8tw8" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.526069 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-encryption-config\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.526316 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gqd92" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.526321 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27420cfc-cc8f-4482-9206-706ab7bf9430-config\") pod \"machine-api-operator-5694c8668f-x242l\" (UID: \"27420cfc-cc8f-4482-9206-706ab7bf9430\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x242l" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.526820 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/105dda6e-573f-49a6-a9a3-d29dd954ee09-config\") pod \"authentication-operator-69f744f599-4tr72\" (UID: \"105dda6e-573f-49a6-a9a3-d29dd954ee09\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-4tr72" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.527336 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/27420cfc-cc8f-4482-9206-706ab7bf9430-images\") pod \"machine-api-operator-5694c8668f-x242l\" (UID: \"27420cfc-cc8f-4482-9206-706ab7bf9430\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x242l" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.536010 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.536149 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pv6pw"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.536780 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mclwt"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.536970 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.537162 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mclwt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.537162 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pv6pw" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.537404 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-z9bs6"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.538905 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/105dda6e-573f-49a6-a9a3-d29dd954ee09-serving-cert\") pod \"authentication-operator-69f744f599-4tr72\" (UID: \"105dda6e-573f-49a6-a9a3-d29dd954ee09\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-4tr72" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.539326 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.539444 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.539851 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-4q25l"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.540017 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-z9bs6" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.540661 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-d8tw8"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.540725 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-4tr72"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.540769 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4q25l" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.540950 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-94zhz"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.541313 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-94zhz" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.541882 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-jnj9m"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.542843 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-tjpcm"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.543747 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-btw2l"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.546354 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-b7nk4"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.547252 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-x4kfj"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.548249 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-x242l"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.550861 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-sfnff"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.555766 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-446xv"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.556639 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-dbzzj"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.558203 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-dbzzj" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.559006 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.559442 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-27m7h"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.561586 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rl8cq"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.562814 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-g8r8c"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.563833 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-btk26"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.564878 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7dtgj"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.565884 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.566992 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-5qhmj"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.568206 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-fjxzh"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.571662 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-qb8qh"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.571961 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-4fzk8"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.573030 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.574233 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-rxr2l"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.574946 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-rxr2l" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.575322 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-dbzzj"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.576356 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-7qxf7"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.577334 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-7gs98"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.578418 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-7gs98" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.580721 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403255-kmtfb"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.581812 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2s7q"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.582880 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-qtskz"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.583866 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-6vv92"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.584920 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bzf7d"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.586347 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-z9bs6"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.587598 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-5572p"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.588894 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-fgg79"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.590182 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-zb6l2"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.591567 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.592798 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mclwt"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.592918 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.593920 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pv6pw"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.595009 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gqd92"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.596129 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-7gs98"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.597176 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-94zhz"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.598182 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-4q25l"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.599483 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-bs46d"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.600028 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-bs46d" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.600438 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-bs46d"] Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.613183 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.626586 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-etcd-serving-ca\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.626611 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/979e7792-1bc6-482b-a63b-fd6d1227970a-serving-cert\") pod \"route-controller-manager-6576b87f9c-7vvhp\" (UID: \"979e7792-1bc6-482b-a63b-fd6d1227970a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.626633 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wqmhm\" (UniqueName: \"kubernetes.io/projected/9ea9e718-d061-4176-b950-12497aeba908-kube-api-access-wqmhm\") pod \"router-default-5444994796-lxr2h\" (UID: \"9ea9e718-d061-4176-b950-12497aeba908\") " pod="openshift-ingress/router-default-5444994796-lxr2h" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.626655 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/675115ca-1ad8-4ab0-a8fc-dea767d6abbc-config\") pod \"openshift-apiserver-operator-796bbdcf4f-jnj9m\" (UID: \"675115ca-1ad8-4ab0-a8fc-dea767d6abbc\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-jnj9m" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.626679 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/15015a4f-e3d3-4042-bf77-70c01c7c05b6-config\") pod \"controller-manager-879f6c89f-d8tw8\" (UID: \"15015a4f-e3d3-4042-bf77-70c01c7c05b6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-d8tw8" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.626707 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-encryption-config\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.626724 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/a9b11630-4b04-471c-9774-400f8211c770-profile-collector-cert\") pod \"catalog-operator-68c6474976-rl8cq\" (UID: \"a9b11630-4b04-471c-9774-400f8211c770\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rl8cq" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.626743 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h2t7t\" (UniqueName: \"kubernetes.io/projected/c9ab96ae-576b-4a86-b4fb-2b059759fb1e-kube-api-access-h2t7t\") pod \"openshift-controller-manager-operator-756b6f6bc6-w2s7q\" (UID: \"c9ab96ae-576b-4a86-b4fb-2b059759fb1e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2s7q" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.626765 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/1e45d40a-645b-4c6f-b001-cb3c8beef2da-tmpfs\") pod \"packageserver-d55dfcdfc-7dtgj\" (UID: \"1e45d40a-645b-4c6f-b001-cb3c8beef2da\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7dtgj" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.626788 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/675115ca-1ad8-4ab0-a8fc-dea767d6abbc-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-jnj9m\" (UID: \"675115ca-1ad8-4ab0-a8fc-dea767d6abbc\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-jnj9m" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.626828 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/edcd7b52-c1d0-4a27-8550-d4b7885eaf5e-serving-cert\") pod \"console-operator-58897d9998-446xv\" (UID: \"edcd7b52-c1d0-4a27-8550-d4b7885eaf5e\") " pod="openshift-console-operator/console-operator-58897d9998-446xv" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.626885 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/15015a4f-e3d3-4042-bf77-70c01c7c05b6-client-ca\") pod \"controller-manager-879f6c89f-d8tw8\" (UID: \"15015a4f-e3d3-4042-bf77-70c01c7c05b6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-d8tw8" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.627399 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/fcc90cdd-595c-4d40-908e-12b1586dfd43-audit-policies\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.627436 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wptwp\" (UniqueName: \"kubernetes.io/projected/a9b11630-4b04-471c-9774-400f8211c770-kube-api-access-wptwp\") pod \"catalog-operator-68c6474976-rl8cq\" (UID: \"a9b11630-4b04-471c-9774-400f8211c770\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rl8cq" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.627457 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c9ab96ae-576b-4a86-b4fb-2b059759fb1e-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-w2s7q\" (UID: \"c9ab96ae-576b-4a86-b4fb-2b059759fb1e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2s7q" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.627478 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4115a44a-84ae-4629-970e-16c25d4f59e1-config\") pod \"machine-approver-56656f9798-zg6dg\" (UID: \"4115a44a-84ae-4629-970e-16c25d4f59e1\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zg6dg" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.627496 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wgd7n\" (UniqueName: \"kubernetes.io/projected/4115a44a-84ae-4629-970e-16c25d4f59e1-kube-api-access-wgd7n\") pod \"machine-approver-56656f9798-zg6dg\" (UID: \"4115a44a-84ae-4629-970e-16c25d4f59e1\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zg6dg" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.627513 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kr7rk\" (UniqueName: \"kubernetes.io/projected/15015a4f-e3d3-4042-bf77-70c01c7c05b6-kube-api-access-kr7rk\") pod \"controller-manager-879f6c89f-d8tw8\" (UID: \"15015a4f-e3d3-4042-bf77-70c01c7c05b6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-d8tw8" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.627565 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.627581 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-node-pullsecrets\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.627598 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/cea77f03-3be2-41bd-be01-cf09fb878b3d-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-x4kfj\" (UID: \"cea77f03-3be2-41bd-be01-cf09fb878b3d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-x4kfj" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.627617 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.627636 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2xkg\" (UniqueName: \"kubernetes.io/projected/1e45d40a-645b-4c6f-b001-cb3c8beef2da-kube-api-access-v2xkg\") pod \"packageserver-d55dfcdfc-7dtgj\" (UID: \"1e45d40a-645b-4c6f-b001-cb3c8beef2da\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7dtgj" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.627665 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smc6m\" (UniqueName: \"kubernetes.io/projected/6698acb2-1ed3-44af-ac70-8c11bdca5c6e-kube-api-access-smc6m\") pod \"openshift-config-operator-7777fb866f-btw2l\" (UID: \"6698acb2-1ed3-44af-ac70-8c11bdca5c6e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-btw2l" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.627680 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edcd7b52-c1d0-4a27-8550-d4b7885eaf5e-config\") pod \"console-operator-58897d9998-446xv\" (UID: \"edcd7b52-c1d0-4a27-8550-d4b7885eaf5e\") " pod="openshift-console-operator/console-operator-58897d9998-446xv" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.627710 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9ea9e718-d061-4176-b950-12497aeba908-metrics-certs\") pod \"router-default-5444994796-lxr2h\" (UID: \"9ea9e718-d061-4176-b950-12497aeba908\") " pod="openshift-ingress/router-default-5444994796-lxr2h" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.627727 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/15015a4f-e3d3-4042-bf77-70c01c7c05b6-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-d8tw8\" (UID: \"15015a4f-e3d3-4042-bf77-70c01c7c05b6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-d8tw8" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.627744 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-image-import-ca\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.627763 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6698acb2-1ed3-44af-ac70-8c11bdca5c6e-serving-cert\") pod \"openshift-config-operator-7777fb866f-btw2l\" (UID: \"6698acb2-1ed3-44af-ac70-8c11bdca5c6e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-btw2l" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.627778 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54pb4\" (UniqueName: \"kubernetes.io/projected/fcc90cdd-595c-4d40-908e-12b1586dfd43-kube-api-access-54pb4\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.627796 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zlvsg\" (UniqueName: \"kubernetes.io/projected/675115ca-1ad8-4ab0-a8fc-dea767d6abbc-kube-api-access-zlvsg\") pod \"openshift-apiserver-operator-796bbdcf4f-jnj9m\" (UID: \"675115ca-1ad8-4ab0-a8fc-dea767d6abbc\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-jnj9m" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.627802 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-etcd-serving-ca\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.627813 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/979e7792-1bc6-482b-a63b-fd6d1227970a-config\") pod \"route-controller-manager-6576b87f9c-7vvhp\" (UID: \"979e7792-1bc6-482b-a63b-fd6d1227970a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.627865 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/675115ca-1ad8-4ab0-a8fc-dea767d6abbc-config\") pod \"openshift-apiserver-operator-796bbdcf4f-jnj9m\" (UID: \"675115ca-1ad8-4ab0-a8fc-dea767d6abbc\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-jnj9m" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.627898 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/9ea9e718-d061-4176-b950-12497aeba908-default-certificate\") pod \"router-default-5444994796-lxr2h\" (UID: \"9ea9e718-d061-4176-b950-12497aeba908\") " pod="openshift-ingress/router-default-5444994796-lxr2h" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.627949 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/15015a4f-e3d3-4042-bf77-70c01c7c05b6-serving-cert\") pod \"controller-manager-879f6c89f-d8tw8\" (UID: \"15015a4f-e3d3-4042-bf77-70c01c7c05b6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-d8tw8" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.627984 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-etcd-client\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.628020 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-audit-dir\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.628055 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/a9b11630-4b04-471c-9774-400f8211c770-srv-cert\") pod \"catalog-operator-68c6474976-rl8cq\" (UID: \"a9b11630-4b04-471c-9774-400f8211c770\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rl8cq" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.628092 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.628123 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.628154 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/6698acb2-1ed3-44af-ac70-8c11bdca5c6e-available-featuregates\") pod \"openshift-config-operator-7777fb866f-btw2l\" (UID: \"6698acb2-1ed3-44af-ac70-8c11bdca5c6e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-btw2l" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.628174 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/15015a4f-e3d3-4042-bf77-70c01c7c05b6-config\") pod \"controller-manager-879f6c89f-d8tw8\" (UID: \"15015a4f-e3d3-4042-bf77-70c01c7c05b6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-d8tw8" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.628187 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-982sv\" (UniqueName: \"kubernetes.io/projected/cea77f03-3be2-41bd-be01-cf09fb878b3d-kube-api-access-982sv\") pod \"cluster-samples-operator-665b6dd947-x4kfj\" (UID: \"cea77f03-3be2-41bd-be01-cf09fb878b3d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-x4kfj" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.628218 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/edcd7b52-c1d0-4a27-8550-d4b7885eaf5e-trusted-ca\") pod \"console-operator-58897d9998-446xv\" (UID: \"edcd7b52-c1d0-4a27-8550-d4b7885eaf5e\") " pod="openshift-console-operator/console-operator-58897d9998-446xv" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.628248 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/979e7792-1bc6-482b-a63b-fd6d1227970a-client-ca\") pod \"route-controller-manager-6576b87f9c-7vvhp\" (UID: \"979e7792-1bc6-482b-a63b-fd6d1227970a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.628294 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-config\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.628327 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qb7vw\" (UniqueName: \"kubernetes.io/projected/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-kube-api-access-qb7vw\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.628358 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1e45d40a-645b-4c6f-b001-cb3c8beef2da-apiservice-cert\") pod \"packageserver-d55dfcdfc-7dtgj\" (UID: \"1e45d40a-645b-4c6f-b001-cb3c8beef2da\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7dtgj" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.628387 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/9ea9e718-d061-4176-b950-12497aeba908-stats-auth\") pod \"router-default-5444994796-lxr2h\" (UID: \"9ea9e718-d061-4176-b950-12497aeba908\") " pod="openshift-ingress/router-default-5444994796-lxr2h" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.628424 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bsgw7\" (UniqueName: \"kubernetes.io/projected/edcd7b52-c1d0-4a27-8550-d4b7885eaf5e-kube-api-access-bsgw7\") pod \"console-operator-58897d9998-446xv\" (UID: \"edcd7b52-c1d0-4a27-8550-d4b7885eaf5e\") " pod="openshift-console-operator/console-operator-58897d9998-446xv" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.628457 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.628478 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.628490 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-audit\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.628547 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/fcc90cdd-595c-4d40-908e-12b1586dfd43-audit-dir\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.628578 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.628615 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4115a44a-84ae-4629-970e-16c25d4f59e1-auth-proxy-config\") pod \"machine-approver-56656f9798-zg6dg\" (UID: \"4115a44a-84ae-4629-970e-16c25d4f59e1\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zg6dg" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.628646 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.628673 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.628719 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/4115a44a-84ae-4629-970e-16c25d4f59e1-machine-approver-tls\") pod \"machine-approver-56656f9798-zg6dg\" (UID: \"4115a44a-84ae-4629-970e-16c25d4f59e1\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zg6dg" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.628762 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-serving-cert\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.628788 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.629206 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/15015a4f-e3d3-4042-bf77-70c01c7c05b6-client-ca\") pod \"controller-manager-879f6c89f-d8tw8\" (UID: \"15015a4f-e3d3-4042-bf77-70c01c7c05b6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-d8tw8" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.629397 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/fcc90cdd-595c-4d40-908e-12b1586dfd43-audit-policies\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.629398 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-audit\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.629647 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/979e7792-1bc6-482b-a63b-fd6d1227970a-config\") pod \"route-controller-manager-6576b87f9c-7vvhp\" (UID: \"979e7792-1bc6-482b-a63b-fd6d1227970a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.629872 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/edcd7b52-c1d0-4a27-8550-d4b7885eaf5e-trusted-ca\") pod \"console-operator-58897d9998-446xv\" (UID: \"edcd7b52-c1d0-4a27-8550-d4b7885eaf5e\") " pod="openshift-console-operator/console-operator-58897d9998-446xv" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.629995 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4115a44a-84ae-4629-970e-16c25d4f59e1-config\") pod \"machine-approver-56656f9798-zg6dg\" (UID: \"4115a44a-84ae-4629-970e-16c25d4f59e1\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zg6dg" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.630043 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-image-import-ca\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.630134 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/edcd7b52-c1d0-4a27-8550-d4b7885eaf5e-serving-cert\") pod \"console-operator-58897d9998-446xv\" (UID: \"edcd7b52-c1d0-4a27-8550-d4b7885eaf5e\") " pod="openshift-console-operator/console-operator-58897d9998-446xv" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.630288 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-node-pullsecrets\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.630330 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/6698acb2-1ed3-44af-ac70-8c11bdca5c6e-available-featuregates\") pod \"openshift-config-operator-7777fb866f-btw2l\" (UID: \"6698acb2-1ed3-44af-ac70-8c11bdca5c6e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-btw2l" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.630339 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-audit-dir\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.630746 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/979e7792-1bc6-482b-a63b-fd6d1227970a-client-ca\") pod \"route-controller-manager-6576b87f9c-7vvhp\" (UID: \"979e7792-1bc6-482b-a63b-fd6d1227970a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.630874 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.630960 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-config\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.631166 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/15015a4f-e3d3-4042-bf77-70c01c7c05b6-serving-cert\") pod \"controller-manager-879f6c89f-d8tw8\" (UID: \"15015a4f-e3d3-4042-bf77-70c01c7c05b6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-d8tw8" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.631228 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/fcc90cdd-595c-4d40-908e-12b1586dfd43-audit-dir\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.631428 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1e45d40a-645b-4c6f-b001-cb3c8beef2da-webhook-cert\") pod \"packageserver-d55dfcdfc-7dtgj\" (UID: \"1e45d40a-645b-4c6f-b001-cb3c8beef2da\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7dtgj" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.631839 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9ab96ae-576b-4a86-b4fb-2b059759fb1e-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-w2s7q\" (UID: \"c9ab96ae-576b-4a86-b4fb-2b059759fb1e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2s7q" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.632038 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.632093 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edcd7b52-c1d0-4a27-8550-d4b7885eaf5e-config\") pod \"console-operator-58897d9998-446xv\" (UID: \"edcd7b52-c1d0-4a27-8550-d4b7885eaf5e\") " pod="openshift-console-operator/console-operator-58897d9998-446xv" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.632048 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4115a44a-84ae-4629-970e-16c25d4f59e1-auth-proxy-config\") pod \"machine-approver-56656f9798-zg6dg\" (UID: \"4115a44a-84ae-4629-970e-16c25d4f59e1\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zg6dg" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.632160 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-trusted-ca-bundle\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.632434 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.632447 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.632546 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jzkhj\" (UniqueName: \"kubernetes.io/projected/979e7792-1bc6-482b-a63b-fd6d1227970a-kube-api-access-jzkhj\") pod \"route-controller-manager-6576b87f9c-7vvhp\" (UID: \"979e7792-1bc6-482b-a63b-fd6d1227970a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.632555 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/15015a4f-e3d3-4042-bf77-70c01c7c05b6-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-d8tw8\" (UID: \"15015a4f-e3d3-4042-bf77-70c01c7c05b6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-d8tw8" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.632725 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9ea9e718-d061-4176-b950-12497aeba908-service-ca-bundle\") pod \"router-default-5444994796-lxr2h\" (UID: \"9ea9e718-d061-4176-b950-12497aeba908\") " pod="openshift-ingress/router-default-5444994796-lxr2h" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.633110 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.634572 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-trusted-ca-bundle\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.635008 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-serving-cert\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.636090 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6698acb2-1ed3-44af-ac70-8c11bdca5c6e-serving-cert\") pod \"openshift-config-operator-7777fb866f-btw2l\" (UID: \"6698acb2-1ed3-44af-ac70-8c11bdca5c6e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-btw2l" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.636294 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.636546 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.636672 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.636897 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.637044 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/cea77f03-3be2-41bd-be01-cf09fb878b3d-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-x4kfj\" (UID: \"cea77f03-3be2-41bd-be01-cf09fb878b3d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-x4kfj" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.637311 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-encryption-config\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.637561 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.637624 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.638192 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-etcd-client\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.639224 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/4115a44a-84ae-4629-970e-16c25d4f59e1-machine-approver-tls\") pod \"machine-approver-56656f9798-zg6dg\" (UID: \"4115a44a-84ae-4629-970e-16c25d4f59e1\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zg6dg" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.639483 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.639554 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.645854 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/675115ca-1ad8-4ab0-a8fc-dea767d6abbc-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-jnj9m\" (UID: \"675115ca-1ad8-4ab0-a8fc-dea767d6abbc\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-jnj9m" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.649345 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/979e7792-1bc6-482b-a63b-fd6d1227970a-serving-cert\") pod \"route-controller-manager-6576b87f9c-7vvhp\" (UID: \"979e7792-1bc6-482b-a63b-fd6d1227970a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.654202 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.672827 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.693159 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.713240 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.733247 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.733366 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wqmhm\" (UniqueName: \"kubernetes.io/projected/9ea9e718-d061-4176-b950-12497aeba908-kube-api-access-wqmhm\") pod \"router-default-5444994796-lxr2h\" (UID: \"9ea9e718-d061-4176-b950-12497aeba908\") " pod="openshift-ingress/router-default-5444994796-lxr2h" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.733415 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/a9b11630-4b04-471c-9774-400f8211c770-profile-collector-cert\") pod \"catalog-operator-68c6474976-rl8cq\" (UID: \"a9b11630-4b04-471c-9774-400f8211c770\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rl8cq" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.733436 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h2t7t\" (UniqueName: \"kubernetes.io/projected/c9ab96ae-576b-4a86-b4fb-2b059759fb1e-kube-api-access-h2t7t\") pod \"openshift-controller-manager-operator-756b6f6bc6-w2s7q\" (UID: \"c9ab96ae-576b-4a86-b4fb-2b059759fb1e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2s7q" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.733457 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/1e45d40a-645b-4c6f-b001-cb3c8beef2da-tmpfs\") pod \"packageserver-d55dfcdfc-7dtgj\" (UID: \"1e45d40a-645b-4c6f-b001-cb3c8beef2da\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7dtgj" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.733511 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wptwp\" (UniqueName: \"kubernetes.io/projected/a9b11630-4b04-471c-9774-400f8211c770-kube-api-access-wptwp\") pod \"catalog-operator-68c6474976-rl8cq\" (UID: \"a9b11630-4b04-471c-9774-400f8211c770\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rl8cq" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.733526 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c9ab96ae-576b-4a86-b4fb-2b059759fb1e-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-w2s7q\" (UID: \"c9ab96ae-576b-4a86-b4fb-2b059759fb1e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2s7q" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.733563 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v2xkg\" (UniqueName: \"kubernetes.io/projected/1e45d40a-645b-4c6f-b001-cb3c8beef2da-kube-api-access-v2xkg\") pod \"packageserver-d55dfcdfc-7dtgj\" (UID: \"1e45d40a-645b-4c6f-b001-cb3c8beef2da\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7dtgj" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.733586 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9ea9e718-d061-4176-b950-12497aeba908-metrics-certs\") pod \"router-default-5444994796-lxr2h\" (UID: \"9ea9e718-d061-4176-b950-12497aeba908\") " pod="openshift-ingress/router-default-5444994796-lxr2h" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.733617 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/9ea9e718-d061-4176-b950-12497aeba908-default-certificate\") pod \"router-default-5444994796-lxr2h\" (UID: \"9ea9e718-d061-4176-b950-12497aeba908\") " pod="openshift-ingress/router-default-5444994796-lxr2h" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.733633 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/a9b11630-4b04-471c-9774-400f8211c770-srv-cert\") pod \"catalog-operator-68c6474976-rl8cq\" (UID: \"a9b11630-4b04-471c-9774-400f8211c770\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rl8cq" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.733659 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1e45d40a-645b-4c6f-b001-cb3c8beef2da-apiservice-cert\") pod \"packageserver-d55dfcdfc-7dtgj\" (UID: \"1e45d40a-645b-4c6f-b001-cb3c8beef2da\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7dtgj" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.733674 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/9ea9e718-d061-4176-b950-12497aeba908-stats-auth\") pod \"router-default-5444994796-lxr2h\" (UID: \"9ea9e718-d061-4176-b950-12497aeba908\") " pod="openshift-ingress/router-default-5444994796-lxr2h" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.733733 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1e45d40a-645b-4c6f-b001-cb3c8beef2da-webhook-cert\") pod \"packageserver-d55dfcdfc-7dtgj\" (UID: \"1e45d40a-645b-4c6f-b001-cb3c8beef2da\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7dtgj" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.733747 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9ab96ae-576b-4a86-b4fb-2b059759fb1e-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-w2s7q\" (UID: \"c9ab96ae-576b-4a86-b4fb-2b059759fb1e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2s7q" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.733764 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9ea9e718-d061-4176-b950-12497aeba908-service-ca-bundle\") pod \"router-default-5444994796-lxr2h\" (UID: \"9ea9e718-d061-4176-b950-12497aeba908\") " pod="openshift-ingress/router-default-5444994796-lxr2h" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.734135 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/1e45d40a-645b-4c6f-b001-cb3c8beef2da-tmpfs\") pod \"packageserver-d55dfcdfc-7dtgj\" (UID: \"1e45d40a-645b-4c6f-b001-cb3c8beef2da\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7dtgj" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.736578 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9ab96ae-576b-4a86-b4fb-2b059759fb1e-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-w2s7q\" (UID: \"c9ab96ae-576b-4a86-b4fb-2b059759fb1e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2s7q" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.736655 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c9ab96ae-576b-4a86-b4fb-2b059759fb1e-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-w2s7q\" (UID: \"c9ab96ae-576b-4a86-b4fb-2b059759fb1e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2s7q" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.737900 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/a9b11630-4b04-471c-9774-400f8211c770-profile-collector-cert\") pod \"catalog-operator-68c6474976-rl8cq\" (UID: \"a9b11630-4b04-471c-9774-400f8211c770\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rl8cq" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.738664 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/a9b11630-4b04-471c-9774-400f8211c770-srv-cert\") pod \"catalog-operator-68c6474976-rl8cq\" (UID: \"a9b11630-4b04-471c-9774-400f8211c770\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rl8cq" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.738780 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1e45d40a-645b-4c6f-b001-cb3c8beef2da-apiservice-cert\") pod \"packageserver-d55dfcdfc-7dtgj\" (UID: \"1e45d40a-645b-4c6f-b001-cb3c8beef2da\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7dtgj" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.740726 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1e45d40a-645b-4c6f-b001-cb3c8beef2da-webhook-cert\") pod \"packageserver-d55dfcdfc-7dtgj\" (UID: \"1e45d40a-645b-4c6f-b001-cb3c8beef2da\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7dtgj" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.752788 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.772768 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.793324 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.813421 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.833598 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.852910 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.874230 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.893421 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.913843 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.933097 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.942109 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/9ea9e718-d061-4176-b950-12497aeba908-stats-auth\") pod \"router-default-5444994796-lxr2h\" (UID: \"9ea9e718-d061-4176-b950-12497aeba908\") " pod="openshift-ingress/router-default-5444994796-lxr2h" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.953629 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.961833 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9ea9e718-d061-4176-b950-12497aeba908-metrics-certs\") pod \"router-default-5444994796-lxr2h\" (UID: \"9ea9e718-d061-4176-b950-12497aeba908\") " pod="openshift-ingress/router-default-5444994796-lxr2h" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.973617 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.993297 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 26 22:23:32 crc kubenswrapper[4903]: I1126 22:23:32.999392 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/9ea9e718-d061-4176-b950-12497aeba908-default-certificate\") pod \"router-default-5444994796-lxr2h\" (UID: \"9ea9e718-d061-4176-b950-12497aeba908\") " pod="openshift-ingress/router-default-5444994796-lxr2h" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.013775 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.015344 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9ea9e718-d061-4176-b950-12497aeba908-service-ca-bundle\") pod \"router-default-5444994796-lxr2h\" (UID: \"9ea9e718-d061-4176-b950-12497aeba908\") " pod="openshift-ingress/router-default-5444994796-lxr2h" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.033588 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.061657 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.072855 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.093000 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.113457 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.133887 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.154382 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.173388 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.193921 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.213628 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.233726 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.254139 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.273477 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.294167 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.314332 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.334078 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.400928 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vt7js\" (UniqueName: \"kubernetes.io/projected/105dda6e-573f-49a6-a9a3-d29dd954ee09-kube-api-access-vt7js\") pod \"authentication-operator-69f744f599-4tr72\" (UID: \"105dda6e-573f-49a6-a9a3-d29dd954ee09\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-4tr72" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.413937 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.434618 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.453946 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.474649 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.494139 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.513518 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.530913 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-4tr72" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.531847 4903 request.go:700] Waited for 1.010499026s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-service-ca-operator/secrets?fieldSelector=metadata.name%3Dservice-ca-operator-dockercfg-rg9jl&limit=500&resourceVersion=0 Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.534379 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.574242 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.582805 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28k7x\" (UniqueName: \"kubernetes.io/projected/27420cfc-cc8f-4482-9206-706ab7bf9430-kube-api-access-28k7x\") pod \"machine-api-operator-5694c8668f-x242l\" (UID: \"27420cfc-cc8f-4482-9206-706ab7bf9430\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x242l" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.593999 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.613638 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.634834 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.654235 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.675918 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-x242l" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.677301 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.767763 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.768213 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.768668 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.769072 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.776141 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.794573 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.813777 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.832911 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.854456 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.874087 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.893209 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.905339 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-4tr72"] Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.913805 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 26 22:23:33 crc kubenswrapper[4903]: W1126 22:23:33.913860 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod105dda6e_573f_49a6_a9a3_d29dd954ee09.slice/crio-1127e2892cd1e3f798077c2e50c684caf87836e3ce59b0042d2028e50bbd8f3e WatchSource:0}: Error finding container 1127e2892cd1e3f798077c2e50c684caf87836e3ce59b0042d2028e50bbd8f3e: Status 404 returned error can't find the container with id 1127e2892cd1e3f798077c2e50c684caf87836e3ce59b0042d2028e50bbd8f3e Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.919004 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-x242l"] Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.941033 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.953306 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.973859 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 26 22:23:33 crc kubenswrapper[4903]: I1126 22:23:33.993742 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.012799 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.033362 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 26 22:23:34 crc kubenswrapper[4903]: W1126 22:23:34.040053 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod27420cfc_cc8f_4482_9206_706ab7bf9430.slice/crio-97839c13b3d96941c5db1e1cf01e7f560c917f07d6ee67f70f496c9b979f140b WatchSource:0}: Error finding container 97839c13b3d96941c5db1e1cf01e7f560c917f07d6ee67f70f496c9b979f140b: Status 404 returned error can't find the container with id 97839c13b3d96941c5db1e1cf01e7f560c917f07d6ee67f70f496c9b979f140b Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.053560 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.074478 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.102208 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.112964 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.136099 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.153842 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.173941 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.193585 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.213717 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.233835 4903 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.254419 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.274424 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.294170 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.316397 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.334925 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.354308 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.403190 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smc6m\" (UniqueName: \"kubernetes.io/projected/6698acb2-1ed3-44af-ac70-8c11bdca5c6e-kube-api-access-smc6m\") pod \"openshift-config-operator-7777fb866f-btw2l\" (UID: \"6698acb2-1ed3-44af-ac70-8c11bdca5c6e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-btw2l" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.420841 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54pb4\" (UniqueName: \"kubernetes.io/projected/fcc90cdd-595c-4d40-908e-12b1586dfd43-kube-api-access-54pb4\") pod \"oauth-openshift-558db77b4-b7nk4\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.442491 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zlvsg\" (UniqueName: \"kubernetes.io/projected/675115ca-1ad8-4ab0-a8fc-dea767d6abbc-kube-api-access-zlvsg\") pod \"openshift-apiserver-operator-796bbdcf4f-jnj9m\" (UID: \"675115ca-1ad8-4ab0-a8fc-dea767d6abbc\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-jnj9m" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.467785 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kr7rk\" (UniqueName: \"kubernetes.io/projected/15015a4f-e3d3-4042-bf77-70c01c7c05b6-kube-api-access-kr7rk\") pod \"controller-manager-879f6c89f-d8tw8\" (UID: \"15015a4f-e3d3-4042-bf77-70c01c7c05b6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-d8tw8" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.483134 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wgd7n\" (UniqueName: \"kubernetes.io/projected/4115a44a-84ae-4629-970e-16c25d4f59e1-kube-api-access-wgd7n\") pod \"machine-approver-56656f9798-zg6dg\" (UID: \"4115a44a-84ae-4629-970e-16c25d4f59e1\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zg6dg" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.503794 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-jnj9m" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.509259 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bsgw7\" (UniqueName: \"kubernetes.io/projected/edcd7b52-c1d0-4a27-8550-d4b7885eaf5e-kube-api-access-bsgw7\") pod \"console-operator-58897d9998-446xv\" (UID: \"edcd7b52-c1d0-4a27-8550-d4b7885eaf5e\") " pod="openshift-console-operator/console-operator-58897d9998-446xv" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.513414 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.516080 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qb7vw\" (UniqueName: \"kubernetes.io/projected/849a13af-5a8d-43b2-b8d8-c07cd8bfe399-kube-api-access-qb7vw\") pod \"apiserver-76f77b778f-tjpcm\" (UID: \"849a13af-5a8d-43b2-b8d8-c07cd8bfe399\") " pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.536528 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-982sv\" (UniqueName: \"kubernetes.io/projected/cea77f03-3be2-41bd-be01-cf09fb878b3d-kube-api-access-982sv\") pod \"cluster-samples-operator-665b6dd947-x4kfj\" (UID: \"cea77f03-3be2-41bd-be01-cf09fb878b3d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-x4kfj" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.547551 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-btw2l" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.552547 4903 request.go:700] Waited for 1.81872379s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager-operator/serviceaccounts/openshift-controller-manager-operator/token Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.552595 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jzkhj\" (UniqueName: \"kubernetes.io/projected/979e7792-1bc6-482b-a63b-fd6d1227970a-kube-api-access-jzkhj\") pod \"route-controller-manager-6576b87f9c-7vvhp\" (UID: \"979e7792-1bc6-482b-a63b-fd6d1227970a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.579511 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h2t7t\" (UniqueName: \"kubernetes.io/projected/c9ab96ae-576b-4a86-b4fb-2b059759fb1e-kube-api-access-h2t7t\") pod \"openshift-controller-manager-operator-756b6f6bc6-w2s7q\" (UID: \"c9ab96ae-576b-4a86-b4fb-2b059759fb1e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2s7q" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.585649 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-446xv" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.590844 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wqmhm\" (UniqueName: \"kubernetes.io/projected/9ea9e718-d061-4176-b950-12497aeba908-kube-api-access-wqmhm\") pod \"router-default-5444994796-lxr2h\" (UID: \"9ea9e718-d061-4176-b950-12497aeba908\") " pod="openshift-ingress/router-default-5444994796-lxr2h" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.610335 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wptwp\" (UniqueName: \"kubernetes.io/projected/a9b11630-4b04-471c-9774-400f8211c770-kube-api-access-wptwp\") pod \"catalog-operator-68c6474976-rl8cq\" (UID: \"a9b11630-4b04-471c-9774-400f8211c770\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rl8cq" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.627285 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.636796 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2xkg\" (UniqueName: \"kubernetes.io/projected/1e45d40a-645b-4c6f-b001-cb3c8beef2da-kube-api-access-v2xkg\") pod \"packageserver-d55dfcdfc-7dtgj\" (UID: \"1e45d40a-645b-4c6f-b001-cb3c8beef2da\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7dtgj" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.675556 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rl8cq" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.677604 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/bebed1e9-678c-4908-bc22-45325ace05cc-signing-cabundle\") pod \"service-ca-9c57cc56f-g8r8c\" (UID: \"bebed1e9-678c-4908-bc22-45325ace05cc\") " pod="openshift-service-ca/service-ca-9c57cc56f-g8r8c" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.677660 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c6c05d44-417f-4f52-a3ed-ee3eb183d452-profile-collector-cert\") pod \"olm-operator-6b444d44fb-fjxzh\" (UID: \"c6c05d44-417f-4f52-a3ed-ee3eb183d452\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-fjxzh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.677730 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xhz4z\" (UniqueName: \"kubernetes.io/projected/e56e61b8-bfdf-4800-9bab-90c0d4bd87bc-kube-api-access-xhz4z\") pod \"machine-config-operator-74547568cd-qtskz\" (UID: \"e56e61b8-bfdf-4800-9bab-90c0d4bd87bc\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtskz" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.677778 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/f432448b-0071-4b69-ba3c-a05a9fc20199-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-5572p\" (UID: \"f432448b-0071-4b69-ba3c-a05a9fc20199\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-5572p" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.677804 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bn7zg\" (UniqueName: \"kubernetes.io/projected/2ac5c6d2-c134-4f7f-bfc0-16358351b0c7-kube-api-access-bn7zg\") pod \"dns-operator-744455d44c-btk26\" (UID: \"2ac5c6d2-c134-4f7f-bfc0-16358351b0c7\") " pod="openshift-dns-operator/dns-operator-744455d44c-btk26" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.677836 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nbf58\" (UniqueName: \"kubernetes.io/projected/c6c05d44-417f-4f52-a3ed-ee3eb183d452-kube-api-access-nbf58\") pod \"olm-operator-6b444d44fb-fjxzh\" (UID: \"c6c05d44-417f-4f52-a3ed-ee3eb183d452\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-fjxzh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.677867 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.677891 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6ca9e3c6-dc50-4e2a-9331-120254254241-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-4fzk8\" (UID: \"6ca9e3c6-dc50-4e2a-9331-120254254241\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-4fzk8" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.677914 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/2ac5c6d2-c134-4f7f-bfc0-16358351b0c7-metrics-tls\") pod \"dns-operator-744455d44c-btk26\" (UID: \"2ac5c6d2-c134-4f7f-bfc0-16358351b0c7\") " pod="openshift-dns-operator/dns-operator-744455d44c-btk26" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.677943 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vglzh\" (UniqueName: \"kubernetes.io/projected/bebed1e9-678c-4908-bc22-45325ace05cc-kube-api-access-vglzh\") pod \"service-ca-9c57cc56f-g8r8c\" (UID: \"bebed1e9-678c-4908-bc22-45325ace05cc\") " pod="openshift-service-ca/service-ca-9c57cc56f-g8r8c" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678000 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/03bb801d-92a8-4a3a-a99b-9de804ba04ab-etcd-service-ca\") pod \"etcd-operator-b45778765-fgg79\" (UID: \"03bb801d-92a8-4a3a-a99b-9de804ba04ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgg79" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678043 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2a44a668-4d2a-4e62-ae67-60314da59785-serving-cert\") pod \"apiserver-7bbb656c7d-lrvrl\" (UID: \"2a44a668-4d2a-4e62-ae67-60314da59785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678064 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6141f47a-9c9b-4ef8-85c9-518f947bff57-config-volume\") pod \"collect-profiles-29403255-kmtfb\" (UID: \"6141f47a-9c9b-4ef8-85c9-518f947bff57\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403255-kmtfb" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678100 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/946ffb39-1ab9-4606-aeba-77e75d32fa17-registry-certificates\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678136 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/946ffb39-1ab9-4606-aeba-77e75d32fa17-registry-tls\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678155 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/946ffb39-1ab9-4606-aeba-77e75d32fa17-ca-trust-extracted\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678175 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/d9fe1c64-2433-4dae-b8b6-3bfa57277181-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-bzf7d\" (UID: \"d9fe1c64-2433-4dae-b8b6-3bfa57277181\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bzf7d" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678248 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1293736c-513c-490e-afb1-97df72e3e51c-console-config\") pod \"console-f9d7485db-sfnff\" (UID: \"1293736c-513c-490e-afb1-97df72e3e51c\") " pod="openshift-console/console-f9d7485db-sfnff" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678270 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6141f47a-9c9b-4ef8-85c9-518f947bff57-secret-volume\") pod \"collect-profiles-29403255-kmtfb\" (UID: \"6141f47a-9c9b-4ef8-85c9-518f947bff57\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403255-kmtfb" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678308 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f2mdb\" (UniqueName: \"kubernetes.io/projected/1293736c-513c-490e-afb1-97df72e3e51c-kube-api-access-f2mdb\") pod \"console-f9d7485db-sfnff\" (UID: \"1293736c-513c-490e-afb1-97df72e3e51c\") " pod="openshift-console/console-f9d7485db-sfnff" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678330 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1293736c-513c-490e-afb1-97df72e3e51c-console-oauth-config\") pod \"console-f9d7485db-sfnff\" (UID: \"1293736c-513c-490e-afb1-97df72e3e51c\") " pod="openshift-console/console-f9d7485db-sfnff" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678350 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/946ffb39-1ab9-4606-aeba-77e75d32fa17-installation-pull-secrets\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678372 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftrmx\" (UniqueName: \"kubernetes.io/projected/6141f47a-9c9b-4ef8-85c9-518f947bff57-kube-api-access-ftrmx\") pod \"collect-profiles-29403255-kmtfb\" (UID: \"6141f47a-9c9b-4ef8-85c9-518f947bff57\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403255-kmtfb" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678395 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e56e61b8-bfdf-4800-9bab-90c0d4bd87bc-auth-proxy-config\") pod \"machine-config-operator-74547568cd-qtskz\" (UID: \"e56e61b8-bfdf-4800-9bab-90c0d4bd87bc\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtskz" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678414 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1293736c-513c-490e-afb1-97df72e3e51c-service-ca\") pod \"console-f9d7485db-sfnff\" (UID: \"1293736c-513c-490e-afb1-97df72e3e51c\") " pod="openshift-console/console-f9d7485db-sfnff" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678442 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x8tbh\" (UniqueName: \"kubernetes.io/projected/e3590281-9d45-43f2-9b51-67aad451c66c-kube-api-access-x8tbh\") pod \"package-server-manager-789f6589d5-27m7h\" (UID: \"e3590281-9d45-43f2-9b51-67aad451c66c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-27m7h" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678461 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2a44a668-4d2a-4e62-ae67-60314da59785-audit-policies\") pod \"apiserver-7bbb656c7d-lrvrl\" (UID: \"2a44a668-4d2a-4e62-ae67-60314da59785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678482 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kx5ts\" (UniqueName: \"kubernetes.io/projected/e4b6f08e-0222-4b7f-9eb7-2c6c37349efa-kube-api-access-kx5ts\") pod \"migrator-59844c95c7-6vv92\" (UID: \"e4b6f08e-0222-4b7f-9eb7-2c6c37349efa\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-6vv92" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678534 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t6vkl\" (UniqueName: \"kubernetes.io/projected/6ca9e3c6-dc50-4e2a-9331-120254254241-kube-api-access-t6vkl\") pod \"machine-config-controller-84d6567774-4fzk8\" (UID: \"6ca9e3c6-dc50-4e2a-9331-120254254241\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-4fzk8" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678568 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d9fe1c64-2433-4dae-b8b6-3bfa57277181-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-bzf7d\" (UID: \"d9fe1c64-2433-4dae-b8b6-3bfa57277181\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bzf7d" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678593 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/946ffb39-1ab9-4606-aeba-77e75d32fa17-bound-sa-token\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678615 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c6c05d44-417f-4f52-a3ed-ee3eb183d452-srv-cert\") pod \"olm-operator-6b444d44fb-fjxzh\" (UID: \"c6c05d44-417f-4f52-a3ed-ee3eb183d452\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-fjxzh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678640 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/6ca9e3c6-dc50-4e2a-9331-120254254241-proxy-tls\") pod \"machine-config-controller-84d6567774-4fzk8\" (UID: \"6ca9e3c6-dc50-4e2a-9331-120254254241\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-4fzk8" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678664 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jbgbp\" (UniqueName: \"kubernetes.io/projected/f432448b-0071-4b69-ba3c-a05a9fc20199-kube-api-access-jbgbp\") pod \"multus-admission-controller-857f4d67dd-5572p\" (UID: \"f432448b-0071-4b69-ba3c-a05a9fc20199\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-5572p" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678753 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1293736c-513c-490e-afb1-97df72e3e51c-oauth-serving-cert\") pod \"console-f9d7485db-sfnff\" (UID: \"1293736c-513c-490e-afb1-97df72e3e51c\") " pod="openshift-console/console-f9d7485db-sfnff" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678788 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/e56e61b8-bfdf-4800-9bab-90c0d4bd87bc-images\") pod \"machine-config-operator-74547568cd-qtskz\" (UID: \"e56e61b8-bfdf-4800-9bab-90c0d4bd87bc\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtskz" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678812 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/2a44a668-4d2a-4e62-ae67-60314da59785-etcd-client\") pod \"apiserver-7bbb656c7d-lrvrl\" (UID: \"2a44a668-4d2a-4e62-ae67-60314da59785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678845 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x2j9n\" (UniqueName: \"kubernetes.io/projected/d9fe1c64-2433-4dae-b8b6-3bfa57277181-kube-api-access-x2j9n\") pod \"cluster-image-registry-operator-dc59b4c8b-bzf7d\" (UID: \"d9fe1c64-2433-4dae-b8b6-3bfa57277181\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bzf7d" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678924 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1293736c-513c-490e-afb1-97df72e3e51c-console-serving-cert\") pod \"console-f9d7485db-sfnff\" (UID: \"1293736c-513c-490e-afb1-97df72e3e51c\") " pod="openshift-console/console-f9d7485db-sfnff" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678948 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/03bb801d-92a8-4a3a-a99b-9de804ba04ab-config\") pod \"etcd-operator-b45778765-fgg79\" (UID: \"03bb801d-92a8-4a3a-a99b-9de804ba04ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgg79" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.678992 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8bcv\" (UniqueName: \"kubernetes.io/projected/946ffb39-1ab9-4606-aeba-77e75d32fa17-kube-api-access-g8bcv\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.679016 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/946ffb39-1ab9-4606-aeba-77e75d32fa17-trusted-ca\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.679039 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e56e61b8-bfdf-4800-9bab-90c0d4bd87bc-proxy-tls\") pod \"machine-config-operator-74547568cd-qtskz\" (UID: \"e56e61b8-bfdf-4800-9bab-90c0d4bd87bc\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtskz" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.679064 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1293736c-513c-490e-afb1-97df72e3e51c-trusted-ca-bundle\") pod \"console-f9d7485db-sfnff\" (UID: \"1293736c-513c-490e-afb1-97df72e3e51c\") " pod="openshift-console/console-f9d7485db-sfnff" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.679084 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2a44a668-4d2a-4e62-ae67-60314da59785-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-lrvrl\" (UID: \"2a44a668-4d2a-4e62-ae67-60314da59785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.679109 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ljj5l\" (UniqueName: \"kubernetes.io/projected/2a44a668-4d2a-4e62-ae67-60314da59785-kube-api-access-ljj5l\") pod \"apiserver-7bbb656c7d-lrvrl\" (UID: \"2a44a668-4d2a-4e62-ae67-60314da59785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.679136 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/e3590281-9d45-43f2-9b51-67aad451c66c-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-27m7h\" (UID: \"e3590281-9d45-43f2-9b51-67aad451c66c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-27m7h" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.679160 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/bebed1e9-678c-4908-bc22-45325ace05cc-signing-key\") pod \"service-ca-9c57cc56f-g8r8c\" (UID: \"bebed1e9-678c-4908-bc22-45325ace05cc\") " pod="openshift-service-ca/service-ca-9c57cc56f-g8r8c" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.679179 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/2a44a668-4d2a-4e62-ae67-60314da59785-encryption-config\") pod \"apiserver-7bbb656c7d-lrvrl\" (UID: \"2a44a668-4d2a-4e62-ae67-60314da59785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" Nov 26 22:23:34 crc kubenswrapper[4903]: E1126 22:23:34.679239 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:35.179222958 +0000 UTC m=+143.869457888 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.679266 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/2a44a668-4d2a-4e62-ae67-60314da59785-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-lrvrl\" (UID: \"2a44a668-4d2a-4e62-ae67-60314da59785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.679290 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2a44a668-4d2a-4e62-ae67-60314da59785-audit-dir\") pod \"apiserver-7bbb656c7d-lrvrl\" (UID: \"2a44a668-4d2a-4e62-ae67-60314da59785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.679315 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/03bb801d-92a8-4a3a-a99b-9de804ba04ab-etcd-client\") pod \"etcd-operator-b45778765-fgg79\" (UID: \"03bb801d-92a8-4a3a-a99b-9de804ba04ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgg79" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.679341 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7nkt\" (UniqueName: \"kubernetes.io/projected/e9e3e3eb-d7d9-4495-bd83-4107f00ae04a-kube-api-access-w7nkt\") pod \"downloads-7954f5f757-7qxf7\" (UID: \"e9e3e3eb-d7d9-4495-bd83-4107f00ae04a\") " pod="openshift-console/downloads-7954f5f757-7qxf7" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.679365 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d9fe1c64-2433-4dae-b8b6-3bfa57277181-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-bzf7d\" (UID: \"d9fe1c64-2433-4dae-b8b6-3bfa57277181\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bzf7d" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.679400 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/03bb801d-92a8-4a3a-a99b-9de804ba04ab-etcd-ca\") pod \"etcd-operator-b45778765-fgg79\" (UID: \"03bb801d-92a8-4a3a-a99b-9de804ba04ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgg79" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.679420 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/03bb801d-92a8-4a3a-a99b-9de804ba04ab-serving-cert\") pod \"etcd-operator-b45778765-fgg79\" (UID: \"03bb801d-92a8-4a3a-a99b-9de804ba04ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgg79" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.679448 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w4vld\" (UniqueName: \"kubernetes.io/projected/03bb801d-92a8-4a3a-a99b-9de804ba04ab-kube-api-access-w4vld\") pod \"etcd-operator-b45778765-fgg79\" (UID: \"03bb801d-92a8-4a3a-a99b-9de804ba04ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgg79" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.689555 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7dtgj" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.697655 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2s7q" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.724157 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-lxr2h" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.733415 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-b7nk4"] Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.748378 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-d8tw8" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.767074 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zg6dg" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.773336 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:34 crc kubenswrapper[4903]: W1126 22:23:34.774132 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9ea9e718_d061_4176_b950_12497aeba908.slice/crio-3025979e7a0f04255f577815a16f485f6813b5e529611ebeb2ba236988e1225d WatchSource:0}: Error finding container 3025979e7a0f04255f577815a16f485f6813b5e529611ebeb2ba236988e1225d: Status 404 returned error can't find the container with id 3025979e7a0f04255f577815a16f485f6813b5e529611ebeb2ba236988e1225d Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.779169 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-jnj9m"] Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.780516 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:34 crc kubenswrapper[4903]: E1126 22:23:34.780863 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:35.280705864 +0000 UTC m=+143.970940854 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781348 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kmtrx\" (UniqueName: \"kubernetes.io/projected/bb685681-92a0-4b59-aa97-b02b9b4c73f9-kube-api-access-kmtrx\") pod \"csi-hostpathplugin-7gs98\" (UID: \"bb685681-92a0-4b59-aa97-b02b9b4c73f9\") " pod="hostpath-provisioner/csi-hostpathplugin-7gs98" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781386 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k8ms2\" (UniqueName: \"kubernetes.io/projected/0346d3c1-896e-4cae-889c-97e4b5acab20-kube-api-access-k8ms2\") pod \"service-ca-operator-777779d784-5qhmj\" (UID: \"0346d3c1-896e-4cae-889c-97e4b5acab20\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-5qhmj" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781405 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9c9d0560-2a3a-45c0-a1db-f90926ec348b-config\") pod \"kube-controller-manager-operator-78b949d7b-z9bs6\" (UID: \"9c9d0560-2a3a-45c0-a1db-f90926ec348b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-z9bs6" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781426 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/946ffb39-1ab9-4606-aeba-77e75d32fa17-registry-tls\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781442 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/946ffb39-1ab9-4606-aeba-77e75d32fa17-ca-trust-extracted\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781458 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/d9fe1c64-2433-4dae-b8b6-3bfa57277181-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-bzf7d\" (UID: \"d9fe1c64-2433-4dae-b8b6-3bfa57277181\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bzf7d" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781485 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6484l\" (UniqueName: \"kubernetes.io/projected/a3218153-38d5-4785-876a-7c3c73097f43-kube-api-access-6484l\") pod \"kube-storage-version-migrator-operator-b67b599dd-pv6pw\" (UID: \"a3218153-38d5-4785-876a-7c3c73097f43\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pv6pw" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781524 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1293736c-513c-490e-afb1-97df72e3e51c-console-config\") pod \"console-f9d7485db-sfnff\" (UID: \"1293736c-513c-490e-afb1-97df72e3e51c\") " pod="openshift-console/console-f9d7485db-sfnff" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781539 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6141f47a-9c9b-4ef8-85c9-518f947bff57-secret-volume\") pod \"collect-profiles-29403255-kmtfb\" (UID: \"6141f47a-9c9b-4ef8-85c9-518f947bff57\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403255-kmtfb" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781554 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h76lf\" (UniqueName: \"kubernetes.io/projected/513976d0-8103-42f8-9091-538d9f27a0f2-kube-api-access-h76lf\") pod \"dns-default-dbzzj\" (UID: \"513976d0-8103-42f8-9091-538d9f27a0f2\") " pod="openshift-dns/dns-default-dbzzj" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781568 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49xm7\" (UniqueName: \"kubernetes.io/projected/9024dcbb-8ec1-4cfc-853d-24f7974073d0-kube-api-access-49xm7\") pod \"ingress-operator-5b745b69d9-4q25l\" (UID: \"9024dcbb-8ec1-4cfc-853d-24f7974073d0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4q25l" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781587 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f2mdb\" (UniqueName: \"kubernetes.io/projected/1293736c-513c-490e-afb1-97df72e3e51c-kube-api-access-f2mdb\") pod \"console-f9d7485db-sfnff\" (UID: \"1293736c-513c-490e-afb1-97df72e3e51c\") " pod="openshift-console/console-f9d7485db-sfnff" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781604 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1293736c-513c-490e-afb1-97df72e3e51c-console-oauth-config\") pod \"console-f9d7485db-sfnff\" (UID: \"1293736c-513c-490e-afb1-97df72e3e51c\") " pod="openshift-console/console-f9d7485db-sfnff" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781632 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a7aa8fd5-6118-4467-9f65-f98cc2f45979-cert\") pod \"ingress-canary-bs46d\" (UID: \"a7aa8fd5-6118-4467-9f65-f98cc2f45979\") " pod="openshift-ingress-canary/ingress-canary-bs46d" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781649 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/74c40195-e9f0-43c0-b896-66e7215889d8-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-mclwt\" (UID: \"74c40195-e9f0-43c0-b896-66e7215889d8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mclwt" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781668 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/946ffb39-1ab9-4606-aeba-77e75d32fa17-installation-pull-secrets\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781685 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftrmx\" (UniqueName: \"kubernetes.io/projected/6141f47a-9c9b-4ef8-85c9-518f947bff57-kube-api-access-ftrmx\") pod \"collect-profiles-29403255-kmtfb\" (UID: \"6141f47a-9c9b-4ef8-85c9-518f947bff57\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403255-kmtfb" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781721 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e56e61b8-bfdf-4800-9bab-90c0d4bd87bc-auth-proxy-config\") pod \"machine-config-operator-74547568cd-qtskz\" (UID: \"e56e61b8-bfdf-4800-9bab-90c0d4bd87bc\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtskz" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781736 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1293736c-513c-490e-afb1-97df72e3e51c-service-ca\") pod \"console-f9d7485db-sfnff\" (UID: \"1293736c-513c-490e-afb1-97df72e3e51c\") " pod="openshift-console/console-f9d7485db-sfnff" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781753 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9024dcbb-8ec1-4cfc-853d-24f7974073d0-bound-sa-token\") pod \"ingress-operator-5b745b69d9-4q25l\" (UID: \"9024dcbb-8ec1-4cfc-853d-24f7974073d0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4q25l" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781779 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x8tbh\" (UniqueName: \"kubernetes.io/projected/e3590281-9d45-43f2-9b51-67aad451c66c-kube-api-access-x8tbh\") pod \"package-server-manager-789f6589d5-27m7h\" (UID: \"e3590281-9d45-43f2-9b51-67aad451c66c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-27m7h" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781797 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2a44a668-4d2a-4e62-ae67-60314da59785-audit-policies\") pod \"apiserver-7bbb656c7d-lrvrl\" (UID: \"2a44a668-4d2a-4e62-ae67-60314da59785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781812 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kx5ts\" (UniqueName: \"kubernetes.io/projected/e4b6f08e-0222-4b7f-9eb7-2c6c37349efa-kube-api-access-kx5ts\") pod \"migrator-59844c95c7-6vv92\" (UID: \"e4b6f08e-0222-4b7f-9eb7-2c6c37349efa\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-6vv92" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781829 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/946ffb39-1ab9-4606-aeba-77e75d32fa17-bound-sa-token\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781846 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t6vkl\" (UniqueName: \"kubernetes.io/projected/6ca9e3c6-dc50-4e2a-9331-120254254241-kube-api-access-t6vkl\") pod \"machine-config-controller-84d6567774-4fzk8\" (UID: \"6ca9e3c6-dc50-4e2a-9331-120254254241\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-4fzk8" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781862 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d9fe1c64-2433-4dae-b8b6-3bfa57277181-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-bzf7d\" (UID: \"d9fe1c64-2433-4dae-b8b6-3bfa57277181\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bzf7d" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781879 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/513976d0-8103-42f8-9091-538d9f27a0f2-config-volume\") pod \"dns-default-dbzzj\" (UID: \"513976d0-8103-42f8-9091-538d9f27a0f2\") " pod="openshift-dns/dns-default-dbzzj" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781896 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c6c05d44-417f-4f52-a3ed-ee3eb183d452-srv-cert\") pod \"olm-operator-6b444d44fb-fjxzh\" (UID: \"c6c05d44-417f-4f52-a3ed-ee3eb183d452\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-fjxzh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781911 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2ca82453-f87b-4664-bc10-0b6f09c50187-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-gqd92\" (UID: \"2ca82453-f87b-4664-bc10-0b6f09c50187\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gqd92" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781930 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/6ca9e3c6-dc50-4e2a-9331-120254254241-proxy-tls\") pod \"machine-config-controller-84d6567774-4fzk8\" (UID: \"6ca9e3c6-dc50-4e2a-9331-120254254241\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-4fzk8" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781946 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jbgbp\" (UniqueName: \"kubernetes.io/projected/f432448b-0071-4b69-ba3c-a05a9fc20199-kube-api-access-jbgbp\") pod \"multus-admission-controller-857f4d67dd-5572p\" (UID: \"f432448b-0071-4b69-ba3c-a05a9fc20199\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-5572p" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781964 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1293736c-513c-490e-afb1-97df72e3e51c-oauth-serving-cert\") pod \"console-f9d7485db-sfnff\" (UID: \"1293736c-513c-490e-afb1-97df72e3e51c\") " pod="openshift-console/console-f9d7485db-sfnff" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781983 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/e56e61b8-bfdf-4800-9bab-90c0d4bd87bc-images\") pod \"machine-config-operator-74547568cd-qtskz\" (UID: \"e56e61b8-bfdf-4800-9bab-90c0d4bd87bc\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtskz" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.781998 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/2a44a668-4d2a-4e62-ae67-60314da59785-etcd-client\") pod \"apiserver-7bbb656c7d-lrvrl\" (UID: \"2a44a668-4d2a-4e62-ae67-60314da59785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782012 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9c9d0560-2a3a-45c0-a1db-f90926ec348b-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-z9bs6\" (UID: \"9c9d0560-2a3a-45c0-a1db-f90926ec348b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-z9bs6" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782028 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2e6e49c1-f210-4ee8-af41-9e43123ae910-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-94zhz\" (UID: \"2e6e49c1-f210-4ee8-af41-9e43123ae910\") " pod="openshift-marketplace/marketplace-operator-79b997595-94zhz" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782045 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x2j9n\" (UniqueName: \"kubernetes.io/projected/d9fe1c64-2433-4dae-b8b6-3bfa57277181-kube-api-access-x2j9n\") pod \"cluster-image-registry-operator-dc59b4c8b-bzf7d\" (UID: \"d9fe1c64-2433-4dae-b8b6-3bfa57277181\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bzf7d" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782096 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/44cea8ea-c6de-417e-b313-9d8beda2f7c6-certs\") pod \"machine-config-server-rxr2l\" (UID: \"44cea8ea-c6de-417e-b313-9d8beda2f7c6\") " pod="openshift-machine-config-operator/machine-config-server-rxr2l" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782113 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1293736c-513c-490e-afb1-97df72e3e51c-console-serving-cert\") pod \"console-f9d7485db-sfnff\" (UID: \"1293736c-513c-490e-afb1-97df72e3e51c\") " pod="openshift-console/console-f9d7485db-sfnff" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782129 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/03bb801d-92a8-4a3a-a99b-9de804ba04ab-config\") pod \"etcd-operator-b45778765-fgg79\" (UID: \"03bb801d-92a8-4a3a-a99b-9de804ba04ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgg79" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782188 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0346d3c1-896e-4cae-889c-97e4b5acab20-config\") pod \"service-ca-operator-777779d784-5qhmj\" (UID: \"0346d3c1-896e-4cae-889c-97e4b5acab20\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-5qhmj" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782214 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8bcv\" (UniqueName: \"kubernetes.io/projected/946ffb39-1ab9-4606-aeba-77e75d32fa17-kube-api-access-g8bcv\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782231 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/946ffb39-1ab9-4606-aeba-77e75d32fa17-trusted-ca\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782248 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a3218153-38d5-4785-876a-7c3c73097f43-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-pv6pw\" (UID: \"a3218153-38d5-4785-876a-7c3c73097f43\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pv6pw" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782264 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/9024dcbb-8ec1-4cfc-853d-24f7974073d0-metrics-tls\") pod \"ingress-operator-5b745b69d9-4q25l\" (UID: \"9024dcbb-8ec1-4cfc-853d-24f7974073d0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4q25l" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782279 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e56e61b8-bfdf-4800-9bab-90c0d4bd87bc-proxy-tls\") pod \"machine-config-operator-74547568cd-qtskz\" (UID: \"e56e61b8-bfdf-4800-9bab-90c0d4bd87bc\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtskz" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782294 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2scc\" (UniqueName: \"kubernetes.io/projected/44cea8ea-c6de-417e-b313-9d8beda2f7c6-kube-api-access-t2scc\") pod \"machine-config-server-rxr2l\" (UID: \"44cea8ea-c6de-417e-b313-9d8beda2f7c6\") " pod="openshift-machine-config-operator/machine-config-server-rxr2l" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782308 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9c9d0560-2a3a-45c0-a1db-f90926ec348b-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-z9bs6\" (UID: \"9c9d0560-2a3a-45c0-a1db-f90926ec348b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-z9bs6" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782326 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1293736c-513c-490e-afb1-97df72e3e51c-trusted-ca-bundle\") pod \"console-f9d7485db-sfnff\" (UID: \"1293736c-513c-490e-afb1-97df72e3e51c\") " pod="openshift-console/console-f9d7485db-sfnff" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782341 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2a44a668-4d2a-4e62-ae67-60314da59785-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-lrvrl\" (UID: \"2a44a668-4d2a-4e62-ae67-60314da59785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782359 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ljj5l\" (UniqueName: \"kubernetes.io/projected/2a44a668-4d2a-4e62-ae67-60314da59785-kube-api-access-ljj5l\") pod \"apiserver-7bbb656c7d-lrvrl\" (UID: \"2a44a668-4d2a-4e62-ae67-60314da59785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782375 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qqddh\" (UniqueName: \"kubernetes.io/projected/a7aa8fd5-6118-4467-9f65-f98cc2f45979-kube-api-access-qqddh\") pod \"ingress-canary-bs46d\" (UID: \"a7aa8fd5-6118-4467-9f65-f98cc2f45979\") " pod="openshift-ingress-canary/ingress-canary-bs46d" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782403 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/e3590281-9d45-43f2-9b51-67aad451c66c-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-27m7h\" (UID: \"e3590281-9d45-43f2-9b51-67aad451c66c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-27m7h" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782420 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/bebed1e9-678c-4908-bc22-45325ace05cc-signing-key\") pod \"service-ca-9c57cc56f-g8r8c\" (UID: \"bebed1e9-678c-4908-bc22-45325ace05cc\") " pod="openshift-service-ca/service-ca-9c57cc56f-g8r8c" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782434 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/2a44a668-4d2a-4e62-ae67-60314da59785-encryption-config\") pod \"apiserver-7bbb656c7d-lrvrl\" (UID: \"2a44a668-4d2a-4e62-ae67-60314da59785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782448 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/bb685681-92a0-4b59-aa97-b02b9b4c73f9-socket-dir\") pod \"csi-hostpathplugin-7gs98\" (UID: \"bb685681-92a0-4b59-aa97-b02b9b4c73f9\") " pod="hostpath-provisioner/csi-hostpathplugin-7gs98" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782484 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/2a44a668-4d2a-4e62-ae67-60314da59785-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-lrvrl\" (UID: \"2a44a668-4d2a-4e62-ae67-60314da59785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782500 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2a44a668-4d2a-4e62-ae67-60314da59785-audit-dir\") pod \"apiserver-7bbb656c7d-lrvrl\" (UID: \"2a44a668-4d2a-4e62-ae67-60314da59785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782514 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/03bb801d-92a8-4a3a-a99b-9de804ba04ab-etcd-client\") pod \"etcd-operator-b45778765-fgg79\" (UID: \"03bb801d-92a8-4a3a-a99b-9de804ba04ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgg79" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782531 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/74c40195-e9f0-43c0-b896-66e7215889d8-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-mclwt\" (UID: \"74c40195-e9f0-43c0-b896-66e7215889d8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mclwt" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782549 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7nkt\" (UniqueName: \"kubernetes.io/projected/e9e3e3eb-d7d9-4495-bd83-4107f00ae04a-kube-api-access-w7nkt\") pod \"downloads-7954f5f757-7qxf7\" (UID: \"e9e3e3eb-d7d9-4495-bd83-4107f00ae04a\") " pod="openshift-console/downloads-7954f5f757-7qxf7" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782566 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d9fe1c64-2433-4dae-b8b6-3bfa57277181-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-bzf7d\" (UID: \"d9fe1c64-2433-4dae-b8b6-3bfa57277181\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bzf7d" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782583 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sq7vb\" (UniqueName: \"kubernetes.io/projected/72c47664-999f-45b2-b047-184bdc7d8c58-kube-api-access-sq7vb\") pod \"control-plane-machine-set-operator-78cbb6b69f-zb6l2\" (UID: \"72c47664-999f-45b2-b047-184bdc7d8c58\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-zb6l2" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782610 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/03bb801d-92a8-4a3a-a99b-9de804ba04ab-etcd-ca\") pod \"etcd-operator-b45778765-fgg79\" (UID: \"03bb801d-92a8-4a3a-a99b-9de804ba04ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgg79" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782624 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/513976d0-8103-42f8-9091-538d9f27a0f2-metrics-tls\") pod \"dns-default-dbzzj\" (UID: \"513976d0-8103-42f8-9091-538d9f27a0f2\") " pod="openshift-dns/dns-default-dbzzj" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782640 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/bb685681-92a0-4b59-aa97-b02b9b4c73f9-plugins-dir\") pod \"csi-hostpathplugin-7gs98\" (UID: \"bb685681-92a0-4b59-aa97-b02b9b4c73f9\") " pod="hostpath-provisioner/csi-hostpathplugin-7gs98" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782666 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/03bb801d-92a8-4a3a-a99b-9de804ba04ab-serving-cert\") pod \"etcd-operator-b45778765-fgg79\" (UID: \"03bb801d-92a8-4a3a-a99b-9de804ba04ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgg79" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782683 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/44cea8ea-c6de-417e-b313-9d8beda2f7c6-node-bootstrap-token\") pod \"machine-config-server-rxr2l\" (UID: \"44cea8ea-c6de-417e-b313-9d8beda2f7c6\") " pod="openshift-machine-config-operator/machine-config-server-rxr2l" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782717 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w4vld\" (UniqueName: \"kubernetes.io/projected/03bb801d-92a8-4a3a-a99b-9de804ba04ab-kube-api-access-w4vld\") pod \"etcd-operator-b45778765-fgg79\" (UID: \"03bb801d-92a8-4a3a-a99b-9de804ba04ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgg79" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782734 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/bb685681-92a0-4b59-aa97-b02b9b4c73f9-registration-dir\") pod \"csi-hostpathplugin-7gs98\" (UID: \"bb685681-92a0-4b59-aa97-b02b9b4c73f9\") " pod="hostpath-provisioner/csi-hostpathplugin-7gs98" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782748 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ca82453-f87b-4664-bc10-0b6f09c50187-config\") pod \"kube-apiserver-operator-766d6c64bb-gqd92\" (UID: \"2ca82453-f87b-4664-bc10-0b6f09c50187\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gqd92" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782764 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/bebed1e9-678c-4908-bc22-45325ace05cc-signing-cabundle\") pod \"service-ca-9c57cc56f-g8r8c\" (UID: \"bebed1e9-678c-4908-bc22-45325ace05cc\") " pod="openshift-service-ca/service-ca-9c57cc56f-g8r8c" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782781 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c6c05d44-417f-4f52-a3ed-ee3eb183d452-profile-collector-cert\") pod \"olm-operator-6b444d44fb-fjxzh\" (UID: \"c6c05d44-417f-4f52-a3ed-ee3eb183d452\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-fjxzh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782799 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xhz4z\" (UniqueName: \"kubernetes.io/projected/e56e61b8-bfdf-4800-9bab-90c0d4bd87bc-kube-api-access-xhz4z\") pod \"machine-config-operator-74547568cd-qtskz\" (UID: \"e56e61b8-bfdf-4800-9bab-90c0d4bd87bc\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtskz" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782818 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/72c47664-999f-45b2-b047-184bdc7d8c58-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-zb6l2\" (UID: \"72c47664-999f-45b2-b047-184bdc7d8c58\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-zb6l2" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782835 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/2e6e49c1-f210-4ee8-af41-9e43123ae910-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-94zhz\" (UID: \"2e6e49c1-f210-4ee8-af41-9e43123ae910\") " pod="openshift-marketplace/marketplace-operator-79b997595-94zhz" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782855 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/f432448b-0071-4b69-ba3c-a05a9fc20199-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-5572p\" (UID: \"f432448b-0071-4b69-ba3c-a05a9fc20199\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-5572p" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782872 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bn7zg\" (UniqueName: \"kubernetes.io/projected/2ac5c6d2-c134-4f7f-bfc0-16358351b0c7-kube-api-access-bn7zg\") pod \"dns-operator-744455d44c-btk26\" (UID: \"2ac5c6d2-c134-4f7f-bfc0-16358351b0c7\") " pod="openshift-dns-operator/dns-operator-744455d44c-btk26" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782889 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nbf58\" (UniqueName: \"kubernetes.io/projected/c6c05d44-417f-4f52-a3ed-ee3eb183d452-kube-api-access-nbf58\") pod \"olm-operator-6b444d44fb-fjxzh\" (UID: \"c6c05d44-417f-4f52-a3ed-ee3eb183d452\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-fjxzh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782930 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.782947 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6ca9e3c6-dc50-4e2a-9331-120254254241-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-4fzk8\" (UID: \"6ca9e3c6-dc50-4e2a-9331-120254254241\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-4fzk8" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.783331 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/2ac5c6d2-c134-4f7f-bfc0-16358351b0c7-metrics-tls\") pod \"dns-operator-744455d44c-btk26\" (UID: \"2ac5c6d2-c134-4f7f-bfc0-16358351b0c7\") " pod="openshift-dns-operator/dns-operator-744455d44c-btk26" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.783366 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vglzh\" (UniqueName: \"kubernetes.io/projected/bebed1e9-678c-4908-bc22-45325ace05cc-kube-api-access-vglzh\") pod \"service-ca-9c57cc56f-g8r8c\" (UID: \"bebed1e9-678c-4908-bc22-45325ace05cc\") " pod="openshift-service-ca/service-ca-9c57cc56f-g8r8c" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.783388 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2a44a668-4d2a-4e62-ae67-60314da59785-audit-policies\") pod \"apiserver-7bbb656c7d-lrvrl\" (UID: \"2a44a668-4d2a-4e62-ae67-60314da59785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.783610 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2ca82453-f87b-4664-bc10-0b6f09c50187-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-gqd92\" (UID: \"2ca82453-f87b-4664-bc10-0b6f09c50187\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gqd92" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.783650 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/bb685681-92a0-4b59-aa97-b02b9b4c73f9-csi-data-dir\") pod \"csi-hostpathplugin-7gs98\" (UID: \"bb685681-92a0-4b59-aa97-b02b9b4c73f9\") " pod="hostpath-provisioner/csi-hostpathplugin-7gs98" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.783668 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/74c40195-e9f0-43c0-b896-66e7215889d8-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-mclwt\" (UID: \"74c40195-e9f0-43c0-b896-66e7215889d8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mclwt" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.783707 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/03bb801d-92a8-4a3a-a99b-9de804ba04ab-etcd-service-ca\") pod \"etcd-operator-b45778765-fgg79\" (UID: \"03bb801d-92a8-4a3a-a99b-9de804ba04ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgg79" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.783725 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4nj7p\" (UniqueName: \"kubernetes.io/projected/2e6e49c1-f210-4ee8-af41-9e43123ae910-kube-api-access-4nj7p\") pod \"marketplace-operator-79b997595-94zhz\" (UID: \"2e6e49c1-f210-4ee8-af41-9e43123ae910\") " pod="openshift-marketplace/marketplace-operator-79b997595-94zhz" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.783751 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2a44a668-4d2a-4e62-ae67-60314da59785-serving-cert\") pod \"apiserver-7bbb656c7d-lrvrl\" (UID: \"2a44a668-4d2a-4e62-ae67-60314da59785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.783768 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6141f47a-9c9b-4ef8-85c9-518f947bff57-config-volume\") pod \"collect-profiles-29403255-kmtfb\" (UID: \"6141f47a-9c9b-4ef8-85c9-518f947bff57\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403255-kmtfb" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.783786 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a3218153-38d5-4785-876a-7c3c73097f43-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-pv6pw\" (UID: \"a3218153-38d5-4785-876a-7c3c73097f43\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pv6pw" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.783805 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/bb685681-92a0-4b59-aa97-b02b9b4c73f9-mountpoint-dir\") pod \"csi-hostpathplugin-7gs98\" (UID: \"bb685681-92a0-4b59-aa97-b02b9b4c73f9\") " pod="hostpath-provisioner/csi-hostpathplugin-7gs98" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.783842 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0346d3c1-896e-4cae-889c-97e4b5acab20-serving-cert\") pod \"service-ca-operator-777779d784-5qhmj\" (UID: \"0346d3c1-896e-4cae-889c-97e4b5acab20\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-5qhmj" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.783861 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/946ffb39-1ab9-4606-aeba-77e75d32fa17-registry-certificates\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.783876 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9024dcbb-8ec1-4cfc-853d-24f7974073d0-trusted-ca\") pod \"ingress-operator-5b745b69d9-4q25l\" (UID: \"9024dcbb-8ec1-4cfc-853d-24f7974073d0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4q25l" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.784830 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/946ffb39-1ab9-4606-aeba-77e75d32fa17-trusted-ca\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.784885 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d9fe1c64-2433-4dae-b8b6-3bfa57277181-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-bzf7d\" (UID: \"d9fe1c64-2433-4dae-b8b6-3bfa57277181\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bzf7d" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.786304 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1293736c-513c-490e-afb1-97df72e3e51c-service-ca\") pod \"console-f9d7485db-sfnff\" (UID: \"1293736c-513c-490e-afb1-97df72e3e51c\") " pod="openshift-console/console-f9d7485db-sfnff" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.787327 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e56e61b8-bfdf-4800-9bab-90c0d4bd87bc-auth-proxy-config\") pod \"machine-config-operator-74547568cd-qtskz\" (UID: \"e56e61b8-bfdf-4800-9bab-90c0d4bd87bc\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtskz" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.787975 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1293736c-513c-490e-afb1-97df72e3e51c-console-oauth-config\") pod \"console-f9d7485db-sfnff\" (UID: \"1293736c-513c-490e-afb1-97df72e3e51c\") " pod="openshift-console/console-f9d7485db-sfnff" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.788248 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1293736c-513c-490e-afb1-97df72e3e51c-trusted-ca-bundle\") pod \"console-f9d7485db-sfnff\" (UID: \"1293736c-513c-490e-afb1-97df72e3e51c\") " pod="openshift-console/console-f9d7485db-sfnff" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.788343 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/e56e61b8-bfdf-4800-9bab-90c0d4bd87bc-images\") pod \"machine-config-operator-74547568cd-qtskz\" (UID: \"e56e61b8-bfdf-4800-9bab-90c0d4bd87bc\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtskz" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.788645 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2a44a668-4d2a-4e62-ae67-60314da59785-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-lrvrl\" (UID: \"2a44a668-4d2a-4e62-ae67-60314da59785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.788776 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/bebed1e9-678c-4908-bc22-45325ace05cc-signing-cabundle\") pod \"service-ca-9c57cc56f-g8r8c\" (UID: \"bebed1e9-678c-4908-bc22-45325ace05cc\") " pod="openshift-service-ca/service-ca-9c57cc56f-g8r8c" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.789328 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c6c05d44-417f-4f52-a3ed-ee3eb183d452-srv-cert\") pod \"olm-operator-6b444d44fb-fjxzh\" (UID: \"c6c05d44-417f-4f52-a3ed-ee3eb183d452\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-fjxzh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.789551 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/946ffb39-1ab9-4606-aeba-77e75d32fa17-installation-pull-secrets\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:34 crc kubenswrapper[4903]: E1126 22:23:34.789948 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:35.289930005 +0000 UTC m=+143.980165025 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.790043 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/946ffb39-1ab9-4606-aeba-77e75d32fa17-ca-trust-extracted\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.790155 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/946ffb39-1ab9-4606-aeba-77e75d32fa17-registry-tls\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.790885 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6ca9e3c6-dc50-4e2a-9331-120254254241-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-4fzk8\" (UID: \"6ca9e3c6-dc50-4e2a-9331-120254254241\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-4fzk8" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.791048 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/03bb801d-92a8-4a3a-a99b-9de804ba04ab-etcd-service-ca\") pod \"etcd-operator-b45778765-fgg79\" (UID: \"03bb801d-92a8-4a3a-a99b-9de804ba04ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgg79" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.791376 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/03bb801d-92a8-4a3a-a99b-9de804ba04ab-config\") pod \"etcd-operator-b45778765-fgg79\" (UID: \"03bb801d-92a8-4a3a-a99b-9de804ba04ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgg79" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.791396 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/2a44a668-4d2a-4e62-ae67-60314da59785-etcd-client\") pod \"apiserver-7bbb656c7d-lrvrl\" (UID: \"2a44a668-4d2a-4e62-ae67-60314da59785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.791714 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/946ffb39-1ab9-4606-aeba-77e75d32fa17-registry-certificates\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.791843 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6141f47a-9c9b-4ef8-85c9-518f947bff57-config-volume\") pod \"collect-profiles-29403255-kmtfb\" (UID: \"6141f47a-9c9b-4ef8-85c9-518f947bff57\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403255-kmtfb" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.792572 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/03bb801d-92a8-4a3a-a99b-9de804ba04ab-etcd-ca\") pod \"etcd-operator-b45778765-fgg79\" (UID: \"03bb801d-92a8-4a3a-a99b-9de804ba04ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgg79" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.793456 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2a44a668-4d2a-4e62-ae67-60314da59785-audit-dir\") pod \"apiserver-7bbb656c7d-lrvrl\" (UID: \"2a44a668-4d2a-4e62-ae67-60314da59785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.793987 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1293736c-513c-490e-afb1-97df72e3e51c-console-serving-cert\") pod \"console-f9d7485db-sfnff\" (UID: \"1293736c-513c-490e-afb1-97df72e3e51c\") " pod="openshift-console/console-f9d7485db-sfnff" Nov 26 22:23:34 crc kubenswrapper[4903]: W1126 22:23:34.794067 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod675115ca_1ad8_4ab0_a8fc_dea767d6abbc.slice/crio-f87071bdc6b34b0c32a3959426cf5ee53878e3d5e6daa6197ab4201ca6fd4592 WatchSource:0}: Error finding container f87071bdc6b34b0c32a3959426cf5ee53878e3d5e6daa6197ab4201ca6fd4592: Status 404 returned error can't find the container with id f87071bdc6b34b0c32a3959426cf5ee53878e3d5e6daa6197ab4201ca6fd4592 Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.794605 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1293736c-513c-490e-afb1-97df72e3e51c-console-config\") pod \"console-f9d7485db-sfnff\" (UID: \"1293736c-513c-490e-afb1-97df72e3e51c\") " pod="openshift-console/console-f9d7485db-sfnff" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.794909 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/2a44a668-4d2a-4e62-ae67-60314da59785-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-lrvrl\" (UID: \"2a44a668-4d2a-4e62-ae67-60314da59785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.795167 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/bebed1e9-678c-4908-bc22-45325ace05cc-signing-key\") pod \"service-ca-9c57cc56f-g8r8c\" (UID: \"bebed1e9-678c-4908-bc22-45325ace05cc\") " pod="openshift-service-ca/service-ca-9c57cc56f-g8r8c" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.795391 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1293736c-513c-490e-afb1-97df72e3e51c-oauth-serving-cert\") pod \"console-f9d7485db-sfnff\" (UID: \"1293736c-513c-490e-afb1-97df72e3e51c\") " pod="openshift-console/console-f9d7485db-sfnff" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.801671 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/6ca9e3c6-dc50-4e2a-9331-120254254241-proxy-tls\") pod \"machine-config-controller-84d6567774-4fzk8\" (UID: \"6ca9e3c6-dc50-4e2a-9331-120254254241\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-4fzk8" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.805268 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c6c05d44-417f-4f52-a3ed-ee3eb183d452-profile-collector-cert\") pod \"olm-operator-6b444d44fb-fjxzh\" (UID: \"c6c05d44-417f-4f52-a3ed-ee3eb183d452\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-fjxzh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.806211 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/03bb801d-92a8-4a3a-a99b-9de804ba04ab-serving-cert\") pod \"etcd-operator-b45778765-fgg79\" (UID: \"03bb801d-92a8-4a3a-a99b-9de804ba04ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgg79" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.806322 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/03bb801d-92a8-4a3a-a99b-9de804ba04ab-etcd-client\") pod \"etcd-operator-b45778765-fgg79\" (UID: \"03bb801d-92a8-4a3a-a99b-9de804ba04ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgg79" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.806601 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e56e61b8-bfdf-4800-9bab-90c0d4bd87bc-proxy-tls\") pod \"machine-config-operator-74547568cd-qtskz\" (UID: \"e56e61b8-bfdf-4800-9bab-90c0d4bd87bc\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtskz" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.809174 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/2a44a668-4d2a-4e62-ae67-60314da59785-encryption-config\") pod \"apiserver-7bbb656c7d-lrvrl\" (UID: \"2a44a668-4d2a-4e62-ae67-60314da59785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.812079 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2a44a668-4d2a-4e62-ae67-60314da59785-serving-cert\") pod \"apiserver-7bbb656c7d-lrvrl\" (UID: \"2a44a668-4d2a-4e62-ae67-60314da59785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.813478 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/e3590281-9d45-43f2-9b51-67aad451c66c-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-27m7h\" (UID: \"e3590281-9d45-43f2-9b51-67aad451c66c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-27m7h" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.814027 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x8tbh\" (UniqueName: \"kubernetes.io/projected/e3590281-9d45-43f2-9b51-67aad451c66c-kube-api-access-x8tbh\") pod \"package-server-manager-789f6589d5-27m7h\" (UID: \"e3590281-9d45-43f2-9b51-67aad451c66c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-27m7h" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.814491 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6141f47a-9c9b-4ef8-85c9-518f947bff57-secret-volume\") pod \"collect-profiles-29403255-kmtfb\" (UID: \"6141f47a-9c9b-4ef8-85c9-518f947bff57\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403255-kmtfb" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.817669 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/d9fe1c64-2433-4dae-b8b6-3bfa57277181-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-bzf7d\" (UID: \"d9fe1c64-2433-4dae-b8b6-3bfa57277181\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bzf7d" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.818735 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/2ac5c6d2-c134-4f7f-bfc0-16358351b0c7-metrics-tls\") pod \"dns-operator-744455d44c-btk26\" (UID: \"2ac5c6d2-c134-4f7f-bfc0-16358351b0c7\") " pod="openshift-dns-operator/dns-operator-744455d44c-btk26" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.821906 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/f432448b-0071-4b69-ba3c-a05a9fc20199-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-5572p\" (UID: \"f432448b-0071-4b69-ba3c-a05a9fc20199\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-5572p" Nov 26 22:23:34 crc kubenswrapper[4903]: W1126 22:23:34.823824 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4115a44a_84ae_4629_970e_16c25d4f59e1.slice/crio-934e9032373dbf4bcd9bd32479d02b2b8876329cfab771edc973dd09e99c8a59 WatchSource:0}: Error finding container 934e9032373dbf4bcd9bd32479d02b2b8876329cfab771edc973dd09e99c8a59: Status 404 returned error can't find the container with id 934e9032373dbf4bcd9bd32479d02b2b8876329cfab771edc973dd09e99c8a59 Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.826972 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-x4kfj" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.831364 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-jnj9m" event={"ID":"675115ca-1ad8-4ab0-a8fc-dea767d6abbc","Type":"ContainerStarted","Data":"f87071bdc6b34b0c32a3959426cf5ee53878e3d5e6daa6197ab4201ca6fd4592"} Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.832687 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8bcv\" (UniqueName: \"kubernetes.io/projected/946ffb39-1ab9-4606-aeba-77e75d32fa17-kube-api-access-g8bcv\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.837991 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-x242l" event={"ID":"27420cfc-cc8f-4482-9206-706ab7bf9430","Type":"ContainerStarted","Data":"824bd7e6df0f091082f08ba8eb97bbd6b70ba62abae873e023ea70f8d91c4cbb"} Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.838022 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-x242l" event={"ID":"27420cfc-cc8f-4482-9206-706ab7bf9430","Type":"ContainerStarted","Data":"97839c13b3d96941c5db1e1cf01e7f560c917f07d6ee67f70f496c9b979f140b"} Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.846435 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-lxr2h" event={"ID":"9ea9e718-d061-4176-b950-12497aeba908","Type":"ContainerStarted","Data":"3025979e7a0f04255f577815a16f485f6813b5e529611ebeb2ba236988e1225d"} Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.850522 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftrmx\" (UniqueName: \"kubernetes.io/projected/6141f47a-9c9b-4ef8-85c9-518f947bff57-kube-api-access-ftrmx\") pod \"collect-profiles-29403255-kmtfb\" (UID: \"6141f47a-9c9b-4ef8-85c9-518f947bff57\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403255-kmtfb" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.853209 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" event={"ID":"fcc90cdd-595c-4d40-908e-12b1586dfd43","Type":"ContainerStarted","Data":"0c7bf9ed2afa4bfc6a62a925a3f6a9aefe2fa29e7da4c94862fd9551f0ddafb5"} Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.862313 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-4tr72" event={"ID":"105dda6e-573f-49a6-a9a3-d29dd954ee09","Type":"ContainerStarted","Data":"ab80b4aef0b8ca3a52676696d1b87279ef1d9709ffc27affe194cf3590131e49"} Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.862572 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-4tr72" event={"ID":"105dda6e-573f-49a6-a9a3-d29dd954ee09","Type":"ContainerStarted","Data":"1127e2892cd1e3f798077c2e50c684caf87836e3ce59b0042d2028e50bbd8f3e"} Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.864572 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zg6dg" event={"ID":"4115a44a-84ae-4629-970e-16c25d4f59e1","Type":"ContainerStarted","Data":"934e9032373dbf4bcd9bd32479d02b2b8876329cfab771edc973dd09e99c8a59"} Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.875494 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kx5ts\" (UniqueName: \"kubernetes.io/projected/e4b6f08e-0222-4b7f-9eb7-2c6c37349efa-kube-api-access-kx5ts\") pod \"migrator-59844c95c7-6vv92\" (UID: \"e4b6f08e-0222-4b7f-9eb7-2c6c37349efa\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-6vv92" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.884471 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.884663 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sq7vb\" (UniqueName: \"kubernetes.io/projected/72c47664-999f-45b2-b047-184bdc7d8c58-kube-api-access-sq7vb\") pod \"control-plane-machine-set-operator-78cbb6b69f-zb6l2\" (UID: \"72c47664-999f-45b2-b047-184bdc7d8c58\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-zb6l2" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.884683 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/513976d0-8103-42f8-9091-538d9f27a0f2-metrics-tls\") pod \"dns-default-dbzzj\" (UID: \"513976d0-8103-42f8-9091-538d9f27a0f2\") " pod="openshift-dns/dns-default-dbzzj" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.884712 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/bb685681-92a0-4b59-aa97-b02b9b4c73f9-plugins-dir\") pod \"csi-hostpathplugin-7gs98\" (UID: \"bb685681-92a0-4b59-aa97-b02b9b4c73f9\") " pod="hostpath-provisioner/csi-hostpathplugin-7gs98" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.884740 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/44cea8ea-c6de-417e-b313-9d8beda2f7c6-node-bootstrap-token\") pod \"machine-config-server-rxr2l\" (UID: \"44cea8ea-c6de-417e-b313-9d8beda2f7c6\") " pod="openshift-machine-config-operator/machine-config-server-rxr2l" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.884767 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/bb685681-92a0-4b59-aa97-b02b9b4c73f9-registration-dir\") pod \"csi-hostpathplugin-7gs98\" (UID: \"bb685681-92a0-4b59-aa97-b02b9b4c73f9\") " pod="hostpath-provisioner/csi-hostpathplugin-7gs98" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.884782 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ca82453-f87b-4664-bc10-0b6f09c50187-config\") pod \"kube-apiserver-operator-766d6c64bb-gqd92\" (UID: \"2ca82453-f87b-4664-bc10-0b6f09c50187\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gqd92" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.884817 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/2e6e49c1-f210-4ee8-af41-9e43123ae910-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-94zhz\" (UID: \"2e6e49c1-f210-4ee8-af41-9e43123ae910\") " pod="openshift-marketplace/marketplace-operator-79b997595-94zhz" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.884833 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/72c47664-999f-45b2-b047-184bdc7d8c58-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-zb6l2\" (UID: \"72c47664-999f-45b2-b047-184bdc7d8c58\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-zb6l2" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.884872 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2ca82453-f87b-4664-bc10-0b6f09c50187-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-gqd92\" (UID: \"2ca82453-f87b-4664-bc10-0b6f09c50187\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gqd92" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.884893 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/bb685681-92a0-4b59-aa97-b02b9b4c73f9-csi-data-dir\") pod \"csi-hostpathplugin-7gs98\" (UID: \"bb685681-92a0-4b59-aa97-b02b9b4c73f9\") " pod="hostpath-provisioner/csi-hostpathplugin-7gs98" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.884907 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/74c40195-e9f0-43c0-b896-66e7215889d8-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-mclwt\" (UID: \"74c40195-e9f0-43c0-b896-66e7215889d8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mclwt" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.884929 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4nj7p\" (UniqueName: \"kubernetes.io/projected/2e6e49c1-f210-4ee8-af41-9e43123ae910-kube-api-access-4nj7p\") pod \"marketplace-operator-79b997595-94zhz\" (UID: \"2e6e49c1-f210-4ee8-af41-9e43123ae910\") " pod="openshift-marketplace/marketplace-operator-79b997595-94zhz" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.884950 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/bb685681-92a0-4b59-aa97-b02b9b4c73f9-mountpoint-dir\") pod \"csi-hostpathplugin-7gs98\" (UID: \"bb685681-92a0-4b59-aa97-b02b9b4c73f9\") " pod="hostpath-provisioner/csi-hostpathplugin-7gs98" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.884965 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a3218153-38d5-4785-876a-7c3c73097f43-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-pv6pw\" (UID: \"a3218153-38d5-4785-876a-7c3c73097f43\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pv6pw" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.884980 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0346d3c1-896e-4cae-889c-97e4b5acab20-serving-cert\") pod \"service-ca-operator-777779d784-5qhmj\" (UID: \"0346d3c1-896e-4cae-889c-97e4b5acab20\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-5qhmj" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.884996 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9024dcbb-8ec1-4cfc-853d-24f7974073d0-trusted-ca\") pod \"ingress-operator-5b745b69d9-4q25l\" (UID: \"9024dcbb-8ec1-4cfc-853d-24f7974073d0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4q25l" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.885011 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9c9d0560-2a3a-45c0-a1db-f90926ec348b-config\") pod \"kube-controller-manager-operator-78b949d7b-z9bs6\" (UID: \"9c9d0560-2a3a-45c0-a1db-f90926ec348b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-z9bs6" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.885026 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kmtrx\" (UniqueName: \"kubernetes.io/projected/bb685681-92a0-4b59-aa97-b02b9b4c73f9-kube-api-access-kmtrx\") pod \"csi-hostpathplugin-7gs98\" (UID: \"bb685681-92a0-4b59-aa97-b02b9b4c73f9\") " pod="hostpath-provisioner/csi-hostpathplugin-7gs98" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.885040 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k8ms2\" (UniqueName: \"kubernetes.io/projected/0346d3c1-896e-4cae-889c-97e4b5acab20-kube-api-access-k8ms2\") pod \"service-ca-operator-777779d784-5qhmj\" (UID: \"0346d3c1-896e-4cae-889c-97e4b5acab20\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-5qhmj" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.885055 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6484l\" (UniqueName: \"kubernetes.io/projected/a3218153-38d5-4785-876a-7c3c73097f43-kube-api-access-6484l\") pod \"kube-storage-version-migrator-operator-b67b599dd-pv6pw\" (UID: \"a3218153-38d5-4785-876a-7c3c73097f43\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pv6pw" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.885077 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h76lf\" (UniqueName: \"kubernetes.io/projected/513976d0-8103-42f8-9091-538d9f27a0f2-kube-api-access-h76lf\") pod \"dns-default-dbzzj\" (UID: \"513976d0-8103-42f8-9091-538d9f27a0f2\") " pod="openshift-dns/dns-default-dbzzj" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.885095 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49xm7\" (UniqueName: \"kubernetes.io/projected/9024dcbb-8ec1-4cfc-853d-24f7974073d0-kube-api-access-49xm7\") pod \"ingress-operator-5b745b69d9-4q25l\" (UID: \"9024dcbb-8ec1-4cfc-853d-24f7974073d0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4q25l" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.885117 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a7aa8fd5-6118-4467-9f65-f98cc2f45979-cert\") pod \"ingress-canary-bs46d\" (UID: \"a7aa8fd5-6118-4467-9f65-f98cc2f45979\") " pod="openshift-ingress-canary/ingress-canary-bs46d" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.885131 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/74c40195-e9f0-43c0-b896-66e7215889d8-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-mclwt\" (UID: \"74c40195-e9f0-43c0-b896-66e7215889d8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mclwt" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.885145 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9024dcbb-8ec1-4cfc-853d-24f7974073d0-bound-sa-token\") pod \"ingress-operator-5b745b69d9-4q25l\" (UID: \"9024dcbb-8ec1-4cfc-853d-24f7974073d0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4q25l" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.885171 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/513976d0-8103-42f8-9091-538d9f27a0f2-config-volume\") pod \"dns-default-dbzzj\" (UID: \"513976d0-8103-42f8-9091-538d9f27a0f2\") " pod="openshift-dns/dns-default-dbzzj" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.885185 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2ca82453-f87b-4664-bc10-0b6f09c50187-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-gqd92\" (UID: \"2ca82453-f87b-4664-bc10-0b6f09c50187\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gqd92" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.885222 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9c9d0560-2a3a-45c0-a1db-f90926ec348b-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-z9bs6\" (UID: \"9c9d0560-2a3a-45c0-a1db-f90926ec348b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-z9bs6" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.885240 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2e6e49c1-f210-4ee8-af41-9e43123ae910-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-94zhz\" (UID: \"2e6e49c1-f210-4ee8-af41-9e43123ae910\") " pod="openshift-marketplace/marketplace-operator-79b997595-94zhz" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.885260 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/44cea8ea-c6de-417e-b313-9d8beda2f7c6-certs\") pod \"machine-config-server-rxr2l\" (UID: \"44cea8ea-c6de-417e-b313-9d8beda2f7c6\") " pod="openshift-machine-config-operator/machine-config-server-rxr2l" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.885277 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0346d3c1-896e-4cae-889c-97e4b5acab20-config\") pod \"service-ca-operator-777779d784-5qhmj\" (UID: \"0346d3c1-896e-4cae-889c-97e4b5acab20\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-5qhmj" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.885301 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a3218153-38d5-4785-876a-7c3c73097f43-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-pv6pw\" (UID: \"a3218153-38d5-4785-876a-7c3c73097f43\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pv6pw" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.885316 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/9024dcbb-8ec1-4cfc-853d-24f7974073d0-metrics-tls\") pod \"ingress-operator-5b745b69d9-4q25l\" (UID: \"9024dcbb-8ec1-4cfc-853d-24f7974073d0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4q25l" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.885331 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9c9d0560-2a3a-45c0-a1db-f90926ec348b-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-z9bs6\" (UID: \"9c9d0560-2a3a-45c0-a1db-f90926ec348b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-z9bs6" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.885345 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2scc\" (UniqueName: \"kubernetes.io/projected/44cea8ea-c6de-417e-b313-9d8beda2f7c6-kube-api-access-t2scc\") pod \"machine-config-server-rxr2l\" (UID: \"44cea8ea-c6de-417e-b313-9d8beda2f7c6\") " pod="openshift-machine-config-operator/machine-config-server-rxr2l" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.885368 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qqddh\" (UniqueName: \"kubernetes.io/projected/a7aa8fd5-6118-4467-9f65-f98cc2f45979-kube-api-access-qqddh\") pod \"ingress-canary-bs46d\" (UID: \"a7aa8fd5-6118-4467-9f65-f98cc2f45979\") " pod="openshift-ingress-canary/ingress-canary-bs46d" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.885386 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/bb685681-92a0-4b59-aa97-b02b9b4c73f9-socket-dir\") pod \"csi-hostpathplugin-7gs98\" (UID: \"bb685681-92a0-4b59-aa97-b02b9b4c73f9\") " pod="hostpath-provisioner/csi-hostpathplugin-7gs98" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.885403 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/74c40195-e9f0-43c0-b896-66e7215889d8-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-mclwt\" (UID: \"74c40195-e9f0-43c0-b896-66e7215889d8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mclwt" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.886025 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/74c40195-e9f0-43c0-b896-66e7215889d8-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-mclwt\" (UID: \"74c40195-e9f0-43c0-b896-66e7215889d8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mclwt" Nov 26 22:23:34 crc kubenswrapper[4903]: E1126 22:23:34.886090 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:35.386078865 +0000 UTC m=+144.076313775 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.886839 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/bb685681-92a0-4b59-aa97-b02b9b4c73f9-plugins-dir\") pod \"csi-hostpathplugin-7gs98\" (UID: \"bb685681-92a0-4b59-aa97-b02b9b4c73f9\") " pod="hostpath-provisioner/csi-hostpathplugin-7gs98" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.891507 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/513976d0-8103-42f8-9091-538d9f27a0f2-config-volume\") pod \"dns-default-dbzzj\" (UID: \"513976d0-8103-42f8-9091-538d9f27a0f2\") " pod="openshift-dns/dns-default-dbzzj" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.891637 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/bb685681-92a0-4b59-aa97-b02b9b4c73f9-mountpoint-dir\") pod \"csi-hostpathplugin-7gs98\" (UID: \"bb685681-92a0-4b59-aa97-b02b9b4c73f9\") " pod="hostpath-provisioner/csi-hostpathplugin-7gs98" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.892015 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2e6e49c1-f210-4ee8-af41-9e43123ae910-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-94zhz\" (UID: \"2e6e49c1-f210-4ee8-af41-9e43123ae910\") " pod="openshift-marketplace/marketplace-operator-79b997595-94zhz" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.892119 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a3218153-38d5-4785-876a-7c3c73097f43-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-pv6pw\" (UID: \"a3218153-38d5-4785-876a-7c3c73097f43\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pv6pw" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.892442 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a7aa8fd5-6118-4467-9f65-f98cc2f45979-cert\") pod \"ingress-canary-bs46d\" (UID: \"a7aa8fd5-6118-4467-9f65-f98cc2f45979\") " pod="openshift-ingress-canary/ingress-canary-bs46d" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.892505 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-446xv"] Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.894162 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/bb685681-92a0-4b59-aa97-b02b9b4c73f9-socket-dir\") pod \"csi-hostpathplugin-7gs98\" (UID: \"bb685681-92a0-4b59-aa97-b02b9b4c73f9\") " pod="hostpath-provisioner/csi-hostpathplugin-7gs98" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.894407 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/bb685681-92a0-4b59-aa97-b02b9b4c73f9-registration-dir\") pod \"csi-hostpathplugin-7gs98\" (UID: \"bb685681-92a0-4b59-aa97-b02b9b4c73f9\") " pod="hostpath-provisioner/csi-hostpathplugin-7gs98" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.895230 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9c9d0560-2a3a-45c0-a1db-f90926ec348b-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-z9bs6\" (UID: \"9c9d0560-2a3a-45c0-a1db-f90926ec348b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-z9bs6" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.895252 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ca82453-f87b-4664-bc10-0b6f09c50187-config\") pod \"kube-apiserver-operator-766d6c64bb-gqd92\" (UID: \"2ca82453-f87b-4664-bc10-0b6f09c50187\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gqd92" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.895291 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/bb685681-92a0-4b59-aa97-b02b9b4c73f9-csi-data-dir\") pod \"csi-hostpathplugin-7gs98\" (UID: \"bb685681-92a0-4b59-aa97-b02b9b4c73f9\") " pod="hostpath-provisioner/csi-hostpathplugin-7gs98" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.895654 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/513976d0-8103-42f8-9091-538d9f27a0f2-metrics-tls\") pod \"dns-default-dbzzj\" (UID: \"513976d0-8103-42f8-9091-538d9f27a0f2\") " pod="openshift-dns/dns-default-dbzzj" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.895811 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0346d3c1-896e-4cae-889c-97e4b5acab20-config\") pod \"service-ca-operator-777779d784-5qhmj\" (UID: \"0346d3c1-896e-4cae-889c-97e4b5acab20\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-5qhmj" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.895901 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9c9d0560-2a3a-45c0-a1db-f90926ec348b-config\") pod \"kube-controller-manager-operator-78b949d7b-z9bs6\" (UID: \"9c9d0560-2a3a-45c0-a1db-f90926ec348b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-z9bs6" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.896394 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/44cea8ea-c6de-417e-b313-9d8beda2f7c6-certs\") pod \"machine-config-server-rxr2l\" (UID: \"44cea8ea-c6de-417e-b313-9d8beda2f7c6\") " pod="openshift-machine-config-operator/machine-config-server-rxr2l" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.897828 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/946ffb39-1ab9-4606-aeba-77e75d32fa17-bound-sa-token\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.898468 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a3218153-38d5-4785-876a-7c3c73097f43-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-pv6pw\" (UID: \"a3218153-38d5-4785-876a-7c3c73097f43\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pv6pw" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.901438 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/74c40195-e9f0-43c0-b896-66e7215889d8-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-mclwt\" (UID: \"74c40195-e9f0-43c0-b896-66e7215889d8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mclwt" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.904841 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9024dcbb-8ec1-4cfc-853d-24f7974073d0-trusted-ca\") pod \"ingress-operator-5b745b69d9-4q25l\" (UID: \"9024dcbb-8ec1-4cfc-853d-24f7974073d0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4q25l" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.906380 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0346d3c1-896e-4cae-889c-97e4b5acab20-serving-cert\") pod \"service-ca-operator-777779d784-5qhmj\" (UID: \"0346d3c1-896e-4cae-889c-97e4b5acab20\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-5qhmj" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.906609 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/72c47664-999f-45b2-b047-184bdc7d8c58-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-zb6l2\" (UID: \"72c47664-999f-45b2-b047-184bdc7d8c58\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-zb6l2" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.911108 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/2e6e49c1-f210-4ee8-af41-9e43123ae910-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-94zhz\" (UID: \"2e6e49c1-f210-4ee8-af41-9e43123ae910\") " pod="openshift-marketplace/marketplace-operator-79b997595-94zhz" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.912541 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/44cea8ea-c6de-417e-b313-9d8beda2f7c6-node-bootstrap-token\") pod \"machine-config-server-rxr2l\" (UID: \"44cea8ea-c6de-417e-b313-9d8beda2f7c6\") " pod="openshift-machine-config-operator/machine-config-server-rxr2l" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.916312 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/9024dcbb-8ec1-4cfc-853d-24f7974073d0-metrics-tls\") pod \"ingress-operator-5b745b69d9-4q25l\" (UID: \"9024dcbb-8ec1-4cfc-853d-24f7974073d0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4q25l" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.916587 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t6vkl\" (UniqueName: \"kubernetes.io/projected/6ca9e3c6-dc50-4e2a-9331-120254254241-kube-api-access-t6vkl\") pod \"machine-config-controller-84d6567774-4fzk8\" (UID: \"6ca9e3c6-dc50-4e2a-9331-120254254241\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-4fzk8" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.918063 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2ca82453-f87b-4664-bc10-0b6f09c50187-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-gqd92\" (UID: \"2ca82453-f87b-4664-bc10-0b6f09c50187\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gqd92" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.919384 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp"] Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.950117 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-27m7h" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.962909 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vglzh\" (UniqueName: \"kubernetes.io/projected/bebed1e9-678c-4908-bc22-45325ace05cc-kube-api-access-vglzh\") pod \"service-ca-9c57cc56f-g8r8c\" (UID: \"bebed1e9-678c-4908-bc22-45325ace05cc\") " pod="openshift-service-ca/service-ca-9c57cc56f-g8r8c" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.967551 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jbgbp\" (UniqueName: \"kubernetes.io/projected/f432448b-0071-4b69-ba3c-a05a9fc20199-kube-api-access-jbgbp\") pod \"multus-admission-controller-857f4d67dd-5572p\" (UID: \"f432448b-0071-4b69-ba3c-a05a9fc20199\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-5572p" Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.986549 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:34 crc kubenswrapper[4903]: E1126 22:23:34.986944 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:35.486931773 +0000 UTC m=+144.177166683 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:34 crc kubenswrapper[4903]: I1126 22:23:34.993527 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bn7zg\" (UniqueName: \"kubernetes.io/projected/2ac5c6d2-c134-4f7f-bfc0-16358351b0c7-kube-api-access-bn7zg\") pod \"dns-operator-744455d44c-btk26\" (UID: \"2ac5c6d2-c134-4f7f-bfc0-16358351b0c7\") " pod="openshift-dns-operator/dns-operator-744455d44c-btk26" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.004145 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-6vv92" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.007846 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ljj5l\" (UniqueName: \"kubernetes.io/projected/2a44a668-4d2a-4e62-ae67-60314da59785-kube-api-access-ljj5l\") pod \"apiserver-7bbb656c7d-lrvrl\" (UID: \"2a44a668-4d2a-4e62-ae67-60314da59785\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.012056 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-btk26" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.030596 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nbf58\" (UniqueName: \"kubernetes.io/projected/c6c05d44-417f-4f52-a3ed-ee3eb183d452-kube-api-access-nbf58\") pod \"olm-operator-6b444d44fb-fjxzh\" (UID: \"c6c05d44-417f-4f52-a3ed-ee3eb183d452\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-fjxzh" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.034669 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-btw2l"] Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.048974 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x2j9n\" (UniqueName: \"kubernetes.io/projected/d9fe1c64-2433-4dae-b8b6-3bfa57277181-kube-api-access-x2j9n\") pod \"cluster-image-registry-operator-dc59b4c8b-bzf7d\" (UID: \"d9fe1c64-2433-4dae-b8b6-3bfa57277181\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bzf7d" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.072141 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xhz4z\" (UniqueName: \"kubernetes.io/projected/e56e61b8-bfdf-4800-9bab-90c0d4bd87bc-kube-api-access-xhz4z\") pod \"machine-config-operator-74547568cd-qtskz\" (UID: \"e56e61b8-bfdf-4800-9bab-90c0d4bd87bc\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtskz" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.077351 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-5572p" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.083680 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtskz" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.086531 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7nkt\" (UniqueName: \"kubernetes.io/projected/e9e3e3eb-d7d9-4495-bd83-4107f00ae04a-kube-api-access-w7nkt\") pod \"downloads-7954f5f757-7qxf7\" (UID: \"e9e3e3eb-d7d9-4495-bd83-4107f00ae04a\") " pod="openshift-console/downloads-7954f5f757-7qxf7" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.087231 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:35 crc kubenswrapper[4903]: E1126 22:23:35.092037 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:35.592003967 +0000 UTC m=+144.282238917 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.092221 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-4fzk8" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.103037 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403255-kmtfb" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.111540 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rl8cq"] Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.115966 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f2mdb\" (UniqueName: \"kubernetes.io/projected/1293736c-513c-490e-afb1-97df72e3e51c-kube-api-access-f2mdb\") pod \"console-f9d7485db-sfnff\" (UID: \"1293736c-513c-490e-afb1-97df72e3e51c\") " pod="openshift-console/console-f9d7485db-sfnff" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.127761 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d9fe1c64-2433-4dae-b8b6-3bfa57277181-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-bzf7d\" (UID: \"d9fe1c64-2433-4dae-b8b6-3bfa57277181\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bzf7d" Nov 26 22:23:35 crc kubenswrapper[4903]: W1126 22:23:35.142859 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda9b11630_4b04_471c_9774_400f8211c770.slice/crio-145a0b8a6eefab62a9743211c882c63fd862a1dd46462034e96e1e0f87d2b419 WatchSource:0}: Error finding container 145a0b8a6eefab62a9743211c882c63fd862a1dd46462034e96e1e0f87d2b419: Status 404 returned error can't find the container with id 145a0b8a6eefab62a9743211c882c63fd862a1dd46462034e96e1e0f87d2b419 Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.152178 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w4vld\" (UniqueName: \"kubernetes.io/projected/03bb801d-92a8-4a3a-a99b-9de804ba04ab-kube-api-access-w4vld\") pod \"etcd-operator-b45778765-fgg79\" (UID: \"03bb801d-92a8-4a3a-a99b-9de804ba04ab\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgg79" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.180804 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7dtgj"] Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.181126 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sq7vb\" (UniqueName: \"kubernetes.io/projected/72c47664-999f-45b2-b047-184bdc7d8c58-kube-api-access-sq7vb\") pod \"control-plane-machine-set-operator-78cbb6b69f-zb6l2\" (UID: \"72c47664-999f-45b2-b047-184bdc7d8c58\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-zb6l2" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.189917 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:35 crc kubenswrapper[4903]: E1126 22:23:35.190601 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:35.690587314 +0000 UTC m=+144.380822224 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.193759 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k8ms2\" (UniqueName: \"kubernetes.io/projected/0346d3c1-896e-4cae-889c-97e4b5acab20-kube-api-access-k8ms2\") pod \"service-ca-operator-777779d784-5qhmj\" (UID: \"0346d3c1-896e-4cae-889c-97e4b5acab20\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-5qhmj" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.195583 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2s7q"] Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.205872 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-27m7h"] Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.206111 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-sfnff" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.209172 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-7qxf7" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.222034 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6484l\" (UniqueName: \"kubernetes.io/projected/a3218153-38d5-4785-876a-7c3c73097f43-kube-api-access-6484l\") pod \"kube-storage-version-migrator-operator-b67b599dd-pv6pw\" (UID: \"a3218153-38d5-4785-876a-7c3c73097f43\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pv6pw" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.235805 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h76lf\" (UniqueName: \"kubernetes.io/projected/513976d0-8103-42f8-9091-538d9f27a0f2-kube-api-access-h76lf\") pod \"dns-default-dbzzj\" (UID: \"513976d0-8103-42f8-9091-538d9f27a0f2\") " pod="openshift-dns/dns-default-dbzzj" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.241191 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-fjxzh" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.244514 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-d8tw8"] Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.259571 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-g8r8c" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.260085 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49xm7\" (UniqueName: \"kubernetes.io/projected/9024dcbb-8ec1-4cfc-853d-24f7974073d0-kube-api-access-49xm7\") pod \"ingress-operator-5b745b69d9-4q25l\" (UID: \"9024dcbb-8ec1-4cfc-853d-24f7974073d0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4q25l" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.282142 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2ca82453-f87b-4664-bc10-0b6f09c50187-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-gqd92\" (UID: \"2ca82453-f87b-4664-bc10-0b6f09c50187\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gqd92" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.282287 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.293018 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9c9d0560-2a3a-45c0-a1db-f90926ec348b-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-z9bs6\" (UID: \"9c9d0560-2a3a-45c0-a1db-f90926ec348b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-z9bs6" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.293242 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:35 crc kubenswrapper[4903]: E1126 22:23:35.293566 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:35.793552069 +0000 UTC m=+144.483786979 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.313057 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-x4kfj"] Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.316933 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-fgg79" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.319521 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-tjpcm"] Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.337927 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4nj7p\" (UniqueName: \"kubernetes.io/projected/2e6e49c1-f210-4ee8-af41-9e43123ae910-kube-api-access-4nj7p\") pod \"marketplace-operator-79b997595-94zhz\" (UID: \"2e6e49c1-f210-4ee8-af41-9e43123ae910\") " pod="openshift-marketplace/marketplace-operator-79b997595-94zhz" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.342471 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/74c40195-e9f0-43c0-b896-66e7215889d8-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-mclwt\" (UID: \"74c40195-e9f0-43c0-b896-66e7215889d8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mclwt" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.353036 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9024dcbb-8ec1-4cfc-853d-24f7974073d0-bound-sa-token\") pod \"ingress-operator-5b745b69d9-4q25l\" (UID: \"9024dcbb-8ec1-4cfc-853d-24f7974073d0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4q25l" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.369490 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qqddh\" (UniqueName: \"kubernetes.io/projected/a7aa8fd5-6118-4467-9f65-f98cc2f45979-kube-api-access-qqddh\") pod \"ingress-canary-bs46d\" (UID: \"a7aa8fd5-6118-4467-9f65-f98cc2f45979\") " pod="openshift-ingress-canary/ingress-canary-bs46d" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.393262 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2scc\" (UniqueName: \"kubernetes.io/projected/44cea8ea-c6de-417e-b313-9d8beda2f7c6-kube-api-access-t2scc\") pod \"machine-config-server-rxr2l\" (UID: \"44cea8ea-c6de-417e-b313-9d8beda2f7c6\") " pod="openshift-machine-config-operator/machine-config-server-rxr2l" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.395098 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:35 crc kubenswrapper[4903]: E1126 22:23:35.395617 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:35.895580919 +0000 UTC m=+144.585815819 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.409570 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kmtrx\" (UniqueName: \"kubernetes.io/projected/bb685681-92a0-4b59-aa97-b02b9b4c73f9-kube-api-access-kmtrx\") pod \"csi-hostpathplugin-7gs98\" (UID: \"bb685681-92a0-4b59-aa97-b02b9b4c73f9\") " pod="hostpath-provisioner/csi-hostpathplugin-7gs98" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.412133 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bzf7d" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.424423 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-zb6l2" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.432747 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-5qhmj" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.449925 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gqd92" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.461049 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mclwt" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.467080 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pv6pw" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.474426 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-z9bs6" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.480138 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4q25l" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.490534 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-94zhz" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.495982 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:35 crc kubenswrapper[4903]: E1126 22:23:35.496972 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:35.996944151 +0000 UTC m=+144.687179071 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.498583 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-dbzzj" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.512790 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-rxr2l" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.539398 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-7gs98" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.547557 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-bs46d" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.563809 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-6vv92"] Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.591727 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403255-kmtfb"] Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.598645 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:35 crc kubenswrapper[4903]: E1126 22:23:35.599073 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:36.099061254 +0000 UTC m=+144.789296154 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.620313 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-fjxzh"] Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.645008 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-7qxf7"] Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.659165 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-btk26"] Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.659559 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-5572p"] Nov 26 22:23:35 crc kubenswrapper[4903]: W1126 22:23:35.685643 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6141f47a_9c9b_4ef8_85c9_518f947bff57.slice/crio-9d2843853485f00c2035084865c7c9d48bb7afeae17b709197179491793b92c6 WatchSource:0}: Error finding container 9d2843853485f00c2035084865c7c9d48bb7afeae17b709197179491793b92c6: Status 404 returned error can't find the container with id 9d2843853485f00c2035084865c7c9d48bb7afeae17b709197179491793b92c6 Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.706050 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:35 crc kubenswrapper[4903]: E1126 22:23:35.706451 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:36.206437111 +0000 UTC m=+144.896672011 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.737480 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-qtskz"] Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.739900 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl"] Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.741153 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-4fzk8"] Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.747165 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-sfnff"] Nov 26 22:23:35 crc kubenswrapper[4903]: W1126 22:23:35.785747 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf432448b_0071_4b69_ba3c_a05a9fc20199.slice/crio-42d2846c81b0434dd9c3060589a6982588d9425498f1ca73d4fcd2e1d0e168ac WatchSource:0}: Error finding container 42d2846c81b0434dd9c3060589a6982588d9425498f1ca73d4fcd2e1d0e168ac: Status 404 returned error can't find the container with id 42d2846c81b0434dd9c3060589a6982588d9425498f1ca73d4fcd2e1d0e168ac Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.807835 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:35 crc kubenswrapper[4903]: E1126 22:23:35.815068 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:36.315053801 +0000 UTC m=+145.005288711 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.829293 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bzf7d"] Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.877217 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gqd92"] Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.878147 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-x242l" event={"ID":"27420cfc-cc8f-4482-9206-706ab7bf9430","Type":"ContainerStarted","Data":"c1ca3e9fc7a4cd6940bb822312b28ab1356cd2687b51bbaa7ae2608de2234a58"} Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.905951 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp" event={"ID":"979e7792-1bc6-482b-a63b-fd6d1227970a","Type":"ContainerStarted","Data":"08a57e9c069e05bb2210b55cc749a4820f7b146e13f88cc57a8bc71f615d7285"} Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.906334 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp" event={"ID":"979e7792-1bc6-482b-a63b-fd6d1227970a","Type":"ContainerStarted","Data":"80005c51987ed8190d0c8392294ea267d59ebacea635fc7a8afa489a4d943f05"} Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.907530 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.912735 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:35 crc kubenswrapper[4903]: E1126 22:23:35.913779 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:36.413762991 +0000 UTC m=+145.103997901 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.914950 4903 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-7vvhp container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.915001 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp" podUID="979e7792-1bc6-482b-a63b-fd6d1227970a" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.921059 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-446xv" event={"ID":"edcd7b52-c1d0-4a27-8550-d4b7885eaf5e","Type":"ContainerStarted","Data":"11f72691c5f11b02cdaa26521efad693f749788cc5ce3eaa69236484d47a8af5"} Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.921096 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-446xv" event={"ID":"edcd7b52-c1d0-4a27-8550-d4b7885eaf5e","Type":"ContainerStarted","Data":"9ab0f09e60e5d6abecbd61c7a89416720debc174536068d8178bab5e4c7d76e9"} Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.921493 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-446xv" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.926556 4903 patch_prober.go:28] interesting pod/console-operator-58897d9998-446xv container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.14:8443/readyz\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.926610 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-446xv" podUID="edcd7b52-c1d0-4a27-8550-d4b7885eaf5e" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.14:8443/readyz\": dial tcp 10.217.0.14:8443: connect: connection refused" Nov 26 22:23:35 crc kubenswrapper[4903]: W1126 22:23:35.958833 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2ca82453_f87b_4664_bc10_0b6f09c50187.slice/crio-994734a4db711d8abd1e0a25286fd0a40874a7476e07d0dcb3f556a68dff3044 WatchSource:0}: Error finding container 994734a4db711d8abd1e0a25286fd0a40874a7476e07d0dcb3f556a68dff3044: Status 404 returned error can't find the container with id 994734a4db711d8abd1e0a25286fd0a40874a7476e07d0dcb3f556a68dff3044 Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.968415 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-7qxf7" event={"ID":"e9e3e3eb-d7d9-4495-bd83-4107f00ae04a","Type":"ContainerStarted","Data":"e4dfddc27e1f415780dcec584ae2b143d27cbfdc28afa3c389d8629ed8ecd0c5"} Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.971650 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rl8cq" event={"ID":"a9b11630-4b04-471c-9774-400f8211c770","Type":"ContainerStarted","Data":"29305571f9d00278171edb0962b1b76d737b28fdea30ca9f450df484f70b6c8b"} Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.971694 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rl8cq" event={"ID":"a9b11630-4b04-471c-9774-400f8211c770","Type":"ContainerStarted","Data":"145a0b8a6eefab62a9743211c882c63fd862a1dd46462034e96e1e0f87d2b419"} Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.973095 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rl8cq" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.974035 4903 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-rl8cq container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.17:8443/healthz\": dial tcp 10.217.0.17:8443: connect: connection refused" start-of-body= Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.974072 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rl8cq" podUID="a9b11630-4b04-471c-9774-400f8211c770" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.17:8443/healthz\": dial tcp 10.217.0.17:8443: connect: connection refused" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.978778 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zg6dg" event={"ID":"4115a44a-84ae-4629-970e-16c25d4f59e1","Type":"ContainerStarted","Data":"072340cff0e46256f7f1a85585bb614b23da08db3bbc94f308b332e1291a82bd"} Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.979107 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pv6pw"] Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.982921 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-fgg79"] Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.985822 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-g8r8c"] Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.986359 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403255-kmtfb" event={"ID":"6141f47a-9c9b-4ef8-85c9-518f947bff57","Type":"ContainerStarted","Data":"9d2843853485f00c2035084865c7c9d48bb7afeae17b709197179491793b92c6"} Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.987622 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-jnj9m" event={"ID":"675115ca-1ad8-4ab0-a8fc-dea767d6abbc","Type":"ContainerStarted","Data":"5b60fd56bbba403241deea23aa1ca58276c9472b96a5133c03b44963477606e7"} Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.991175 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-d8tw8" event={"ID":"15015a4f-e3d3-4042-bf77-70c01c7c05b6","Type":"ContainerStarted","Data":"9c6e1caeb91f7e831b468dd07690f583fe6781585cc9c91c486994187a2152e8"} Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.991484 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-d8tw8" event={"ID":"15015a4f-e3d3-4042-bf77-70c01c7c05b6","Type":"ContainerStarted","Data":"005cc6ce29e0ec3a85b99fc9ff657145754a64646bcc609b4cd8e0e24867c194"} Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.991504 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-d8tw8" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.993226 4903 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-d8tw8 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.993265 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-d8tw8" podUID="15015a4f-e3d3-4042-bf77-70c01c7c05b6" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.993492 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-x4kfj" event={"ID":"cea77f03-3be2-41bd-be01-cf09fb878b3d","Type":"ContainerStarted","Data":"aa38a62fdf1f507b5c20ea3b5146527f0e6a500a53702e16f6e6888a8fd114ef"} Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.995470 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-btk26" event={"ID":"2ac5c6d2-c134-4f7f-bfc0-16358351b0c7","Type":"ContainerStarted","Data":"ddb182574cce03bf94cea5d6b7d80fcde5b09cf4184f9d19e643977aa91fb9d0"} Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.997198 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2s7q" event={"ID":"c9ab96ae-576b-4a86-b4fb-2b059759fb1e","Type":"ContainerStarted","Data":"5c72bc9663f40222247487b9450b4e1eca2e70064f18e30b014d880e7976bc1d"} Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.997232 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2s7q" event={"ID":"c9ab96ae-576b-4a86-b4fb-2b059759fb1e","Type":"ContainerStarted","Data":"5c9942e9e03658cc6709d9f2506f601fc19ee4eadb14241334a26d98987ab1ec"} Nov 26 22:23:35 crc kubenswrapper[4903]: I1126 22:23:35.999820 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" event={"ID":"2a44a668-4d2a-4e62-ae67-60314da59785","Type":"ContainerStarted","Data":"e11162e1ce02a97ce5f3c4c8c77588d0d06c2a4d96fabd1debe78ef5fb574f5c"} Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.000572 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-4fzk8" event={"ID":"6ca9e3c6-dc50-4e2a-9331-120254254241","Type":"ContainerStarted","Data":"5d10d2079b1ee8177bd70b424602ad7be8a9151d44246d00a4ed431c1c452c60"} Nov 26 22:23:36 crc kubenswrapper[4903]: W1126 22:23:36.005677 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbebed1e9_678c_4908_bc22_45325ace05cc.slice/crio-40271c94be6f30dde20f9781842e463e04684fd3b23837d7c6a88c71fd176780 WatchSource:0}: Error finding container 40271c94be6f30dde20f9781842e463e04684fd3b23837d7c6a88c71fd176780: Status 404 returned error can't find the container with id 40271c94be6f30dde20f9781842e463e04684fd3b23837d7c6a88c71fd176780 Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.006432 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-27m7h" event={"ID":"e3590281-9d45-43f2-9b51-67aad451c66c","Type":"ContainerStarted","Data":"64957b9d2e42a994c66bcb0fbc2dd1660765ab220b385295dd99466ccb1d8f7d"} Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.013804 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:36 crc kubenswrapper[4903]: E1126 22:23:36.015829 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:36.515814742 +0000 UTC m=+145.206049762 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.046667 4903 generic.go:334] "Generic (PLEG): container finished" podID="6698acb2-1ed3-44af-ac70-8c11bdca5c6e" containerID="b8622e1e2662e6c0d96148eb12d17a0cca7ec1e19682c8d55571d8f4ba88916d" exitCode=0 Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.059719 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-btw2l" event={"ID":"6698acb2-1ed3-44af-ac70-8c11bdca5c6e","Type":"ContainerDied","Data":"b8622e1e2662e6c0d96148eb12d17a0cca7ec1e19682c8d55571d8f4ba88916d"} Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.059759 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-btw2l" event={"ID":"6698acb2-1ed3-44af-ac70-8c11bdca5c6e","Type":"ContainerStarted","Data":"edaf9486d01e83ab720d236e7a2bf7f5e03b18547b4ab242d234f96b462b11f7"} Nov 26 22:23:36 crc kubenswrapper[4903]: W1126 22:23:36.064406 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod03bb801d_92a8_4a3a_a99b_9de804ba04ab.slice/crio-f601e8367f7381161379a32122d83384fd10a3039da0afa60620a089325320ad WatchSource:0}: Error finding container f601e8367f7381161379a32122d83384fd10a3039da0afa60620a089325320ad: Status 404 returned error can't find the container with id f601e8367f7381161379a32122d83384fd10a3039da0afa60620a089325320ad Nov 26 22:23:36 crc kubenswrapper[4903]: W1126 22:23:36.065879 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda3218153_38d5_4785_876a_7c3c73097f43.slice/crio-06efaab6584d8623d4da315e1eebdaa15a3e0af0231a210b25d8cc04d3e17358 WatchSource:0}: Error finding container 06efaab6584d8623d4da315e1eebdaa15a3e0af0231a210b25d8cc04d3e17358: Status 404 returned error can't find the container with id 06efaab6584d8623d4da315e1eebdaa15a3e0af0231a210b25d8cc04d3e17358 Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.067247 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-sfnff" event={"ID":"1293736c-513c-490e-afb1-97df72e3e51c","Type":"ContainerStarted","Data":"93994046d4391371554c217ef1cfa80caec2a5ba801ddbe3bf292392733b4bcd"} Nov 26 22:23:36 crc kubenswrapper[4903]: W1126 22:23:36.069188 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod44cea8ea_c6de_417e_b313_9d8beda2f7c6.slice/crio-584104a9b328c7ea4e4e34bdb9d5e3fdce8d8cdf9cad77b5d11154b77e83a646 WatchSource:0}: Error finding container 584104a9b328c7ea4e4e34bdb9d5e3fdce8d8cdf9cad77b5d11154b77e83a646: Status 404 returned error can't find the container with id 584104a9b328c7ea4e4e34bdb9d5e3fdce8d8cdf9cad77b5d11154b77e83a646 Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.084370 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7dtgj" event={"ID":"1e45d40a-645b-4c6f-b001-cb3c8beef2da","Type":"ContainerStarted","Data":"80be51c5333d7a702d5bf4f21462a476dfabcf34a3edc385d76034d89ed0cf85"} Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.084420 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7dtgj" event={"ID":"1e45d40a-645b-4c6f-b001-cb3c8beef2da","Type":"ContainerStarted","Data":"84bb6cd1f50089391b6a6d2cb11ba3f75e70018a9aeb250d5c2af21b254b2b95"} Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.085065 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7dtgj" Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.086752 4903 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-7dtgj container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.18:5443/healthz\": dial tcp 10.217.0.18:5443: connect: connection refused" start-of-body= Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.086791 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7dtgj" podUID="1e45d40a-645b-4c6f-b001-cb3c8beef2da" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.18:5443/healthz\": dial tcp 10.217.0.18:5443: connect: connection refused" Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.092286 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-6vv92" event={"ID":"e4b6f08e-0222-4b7f-9eb7-2c6c37349efa","Type":"ContainerStarted","Data":"24f40c07ceacb8efc06bd146a62f16fa52d14a618ed0ebc95860708406e28841"} Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.095335 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-fjxzh" event={"ID":"c6c05d44-417f-4f52-a3ed-ee3eb183d452","Type":"ContainerStarted","Data":"990f641540e553a9b3b57b10461da12f30f7dcf7e233d68cfcb01ab9045676d9"} Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.097572 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-5572p" event={"ID":"f432448b-0071-4b69-ba3c-a05a9fc20199","Type":"ContainerStarted","Data":"42d2846c81b0434dd9c3060589a6982588d9425498f1ca73d4fcd2e1d0e168ac"} Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.102455 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" event={"ID":"849a13af-5a8d-43b2-b8d8-c07cd8bfe399","Type":"ContainerStarted","Data":"a787bb08791e5a8628b71e2df744beb23114f847a4a11f14e622a9fbe5eeb247"} Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.107591 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-lxr2h" event={"ID":"9ea9e718-d061-4176-b950-12497aeba908","Type":"ContainerStarted","Data":"e51b1c9b44e6fd0260ab7f32716882251eaaf6e3ef8d95fbc1bcf04b2da23de9"} Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.114590 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:36 crc kubenswrapper[4903]: E1126 22:23:36.114663 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:36.614647545 +0000 UTC m=+145.304882455 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.114776 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:36 crc kubenswrapper[4903]: E1126 22:23:36.115053 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:36.615037895 +0000 UTC m=+145.305272805 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.119648 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" event={"ID":"fcc90cdd-595c-4d40-908e-12b1586dfd43","Type":"ContainerStarted","Data":"865a0040345d63e5234a54e30d78cce93cdf13dc4e2afa2e1f330b4c50cb8b9b"} Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.125993 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtskz" event={"ID":"e56e61b8-bfdf-4800-9bab-90c0d4bd87bc","Type":"ContainerStarted","Data":"aba8fcf3b7f0f85a17b131d515fc4fbda9c2f14c73037a0eb6c59f47b9d135cf"} Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.216553 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:36 crc kubenswrapper[4903]: E1126 22:23:36.216730 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:36.716715446 +0000 UTC m=+145.406950356 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.217029 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:36 crc kubenswrapper[4903]: E1126 22:23:36.221244 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:36.72123427 +0000 UTC m=+145.411469180 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.295795 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-5qhmj"] Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.309593 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-zb6l2"] Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.309764 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-4q25l"] Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.326122 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:36 crc kubenswrapper[4903]: E1126 22:23:36.326390 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:36.826375695 +0000 UTC m=+145.516610605 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:36 crc kubenswrapper[4903]: W1126 22:23:36.393595 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9024dcbb_8ec1_4cfc_853d_24f7974073d0.slice/crio-4d5190a47ccf97fd5c39b5fb52bb79b0db44a90e8d44200ac0181d285f9ae2e6 WatchSource:0}: Error finding container 4d5190a47ccf97fd5c39b5fb52bb79b0db44a90e8d44200ac0181d285f9ae2e6: Status 404 returned error can't find the container with id 4d5190a47ccf97fd5c39b5fb52bb79b0db44a90e8d44200ac0181d285f9ae2e6 Nov 26 22:23:36 crc kubenswrapper[4903]: W1126 22:23:36.399903 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod72c47664_999f_45b2_b047_184bdc7d8c58.slice/crio-e55cf4b962d9edb0302c6ad4ba07db9ef017d53cac5808862b60747f960f385f WatchSource:0}: Error finding container e55cf4b962d9edb0302c6ad4ba07db9ef017d53cac5808862b60747f960f385f: Status 404 returned error can't find the container with id e55cf4b962d9edb0302c6ad4ba07db9ef017d53cac5808862b60747f960f385f Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.427220 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:36 crc kubenswrapper[4903]: E1126 22:23:36.427782 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:36.927771578 +0000 UTC m=+145.618006488 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.429044 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-446xv" podStartSLOduration=124.429025782 podStartE2EDuration="2m4.429025782s" podCreationTimestamp="2025-11-26 22:21:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:36.428138178 +0000 UTC m=+145.118373088" watchObservedRunningTime="2025-11-26 22:23:36.429025782 +0000 UTC m=+145.119260702" Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.434991 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mclwt"] Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.445417 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-z9bs6"] Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.468052 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-lxr2h" podStartSLOduration=123.468035169 podStartE2EDuration="2m3.468035169s" podCreationTimestamp="2025-11-26 22:21:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:36.46661763 +0000 UTC m=+145.156852560" watchObservedRunningTime="2025-11-26 22:23:36.468035169 +0000 UTC m=+145.158270079" Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.533305 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:36 crc kubenswrapper[4903]: E1126 22:23:36.533651 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:37.033637453 +0000 UTC m=+145.723872363 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.543440 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp" podStartSLOduration=123.543421041 podStartE2EDuration="2m3.543421041s" podCreationTimestamp="2025-11-26 22:21:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:36.508504336 +0000 UTC m=+145.198739266" watchObservedRunningTime="2025-11-26 22:23:36.543421041 +0000 UTC m=+145.233655961" Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.573890 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-7gs98"] Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.575272 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-94zhz"] Nov 26 22:23:36 crc kubenswrapper[4903]: W1126 22:23:36.623182 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2e6e49c1_f210_4ee8_af41_9e43123ae910.slice/crio-e916306917abffea8b6ade0b700c82732bdd349be72b442bb22acafa08e0a223 WatchSource:0}: Error finding container e916306917abffea8b6ade0b700c82732bdd349be72b442bb22acafa08e0a223: Status 404 returned error can't find the container with id e916306917abffea8b6ade0b700c82732bdd349be72b442bb22acafa08e0a223 Nov 26 22:23:36 crc kubenswrapper[4903]: W1126 22:23:36.624898 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbb685681_92a0_4b59_aa97_b02b9b4c73f9.slice/crio-8491e140fac6da7ec634430fdda220d6b44b7f0d4055940c6f1be8ad5a121d99 WatchSource:0}: Error finding container 8491e140fac6da7ec634430fdda220d6b44b7f0d4055940c6f1be8ad5a121d99: Status 404 returned error can't find the container with id 8491e140fac6da7ec634430fdda220d6b44b7f0d4055940c6f1be8ad5a121d99 Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.641794 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:36 crc kubenswrapper[4903]: E1126 22:23:36.642421 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:37.142410918 +0000 UTC m=+145.832645828 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.649190 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" podStartSLOduration=124.649166032 podStartE2EDuration="2m4.649166032s" podCreationTimestamp="2025-11-26 22:21:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:36.641156994 +0000 UTC m=+145.331391904" watchObservedRunningTime="2025-11-26 22:23:36.649166032 +0000 UTC m=+145.339400932" Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.651946 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-dbzzj"] Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.720932 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-bs46d"] Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.726771 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-lxr2h" Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.733546 4903 patch_prober.go:28] interesting pod/router-default-5444994796-lxr2h container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 22:23:36 crc kubenswrapper[4903]: [-]has-synced failed: reason withheld Nov 26 22:23:36 crc kubenswrapper[4903]: [+]process-running ok Nov 26 22:23:36 crc kubenswrapper[4903]: healthz check failed Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.733583 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-lxr2h" podUID="9ea9e718-d061-4176-b950-12497aeba908" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.743233 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:36 crc kubenswrapper[4903]: E1126 22:23:36.743546 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:37.243531504 +0000 UTC m=+145.933766414 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:36 crc kubenswrapper[4903]: W1126 22:23:36.760753 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda7aa8fd5_6118_4467_9f65_f98cc2f45979.slice/crio-93b5418721332ae08cdeeb12e71763a23384788297e9ba949e5a1fb3e828c395 WatchSource:0}: Error finding container 93b5418721332ae08cdeeb12e71763a23384788297e9ba949e5a1fb3e828c395: Status 404 returned error can't find the container with id 93b5418721332ae08cdeeb12e71763a23384788297e9ba949e5a1fb3e828c395 Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.787642 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-x242l" podStartSLOduration=123.787626919 podStartE2EDuration="2m3.787626919s" podCreationTimestamp="2025-11-26 22:21:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:36.786398115 +0000 UTC m=+145.476633025" watchObservedRunningTime="2025-11-26 22:23:36.787626919 +0000 UTC m=+145.477861829" Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.844491 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:36 crc kubenswrapper[4903]: E1126 22:23:36.845662 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:37.345649977 +0000 UTC m=+146.035884887 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.909233 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-4tr72" podStartSLOduration=124.909216695 podStartE2EDuration="2m4.909216695s" podCreationTimestamp="2025-11-26 22:21:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:36.867305488 +0000 UTC m=+145.557540398" watchObservedRunningTime="2025-11-26 22:23:36.909216695 +0000 UTC m=+145.599451605" Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.909672 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-jnj9m" podStartSLOduration=124.909666387 podStartE2EDuration="2m4.909666387s" podCreationTimestamp="2025-11-26 22:21:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:36.904230339 +0000 UTC m=+145.594465249" watchObservedRunningTime="2025-11-26 22:23:36.909666387 +0000 UTC m=+145.599901297" Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.945083 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-d8tw8" podStartSLOduration=123.945064585 podStartE2EDuration="2m3.945064585s" podCreationTimestamp="2025-11-26 22:21:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:36.942630939 +0000 UTC m=+145.632865849" watchObservedRunningTime="2025-11-26 22:23:36.945064585 +0000 UTC m=+145.635299495" Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.945339 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:36 crc kubenswrapper[4903]: E1126 22:23:36.946218 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:37.446194647 +0000 UTC m=+146.136429557 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:36 crc kubenswrapper[4903]: I1126 22:23:36.986974 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7dtgj" podStartSLOduration=123.986950171 podStartE2EDuration="2m3.986950171s" podCreationTimestamp="2025-11-26 22:21:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:36.983832955 +0000 UTC m=+145.674067865" watchObservedRunningTime="2025-11-26 22:23:36.986950171 +0000 UTC m=+145.677185081" Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.048027 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:37 crc kubenswrapper[4903]: E1126 22:23:37.048368 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:37.548356539 +0000 UTC m=+146.238591449 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.124066 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rl8cq" podStartSLOduration=124.12405087 podStartE2EDuration="2m4.12405087s" podCreationTimestamp="2025-11-26 22:21:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:37.065659023 +0000 UTC m=+145.755893923" watchObservedRunningTime="2025-11-26 22:23:37.12405087 +0000 UTC m=+145.814285780" Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.141437 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-fgg79" event={"ID":"03bb801d-92a8-4a3a-a99b-9de804ba04ab","Type":"ContainerStarted","Data":"f601e8367f7381161379a32122d83384fd10a3039da0afa60620a089325320ad"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.147125 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-bs46d" event={"ID":"a7aa8fd5-6118-4467-9f65-f98cc2f45979","Type":"ContainerStarted","Data":"93b5418721332ae08cdeeb12e71763a23384788297e9ba949e5a1fb3e828c395"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.151477 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-94zhz" event={"ID":"2e6e49c1-f210-4ee8-af41-9e43123ae910","Type":"ContainerStarted","Data":"e916306917abffea8b6ade0b700c82732bdd349be72b442bb22acafa08e0a223"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.151501 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:37 crc kubenswrapper[4903]: E1126 22:23:37.151918 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:37.651905681 +0000 UTC m=+146.342140591 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.152389 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2s7q" podStartSLOduration=124.152368765 podStartE2EDuration="2m4.152368765s" podCreationTimestamp="2025-11-26 22:21:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:37.122814056 +0000 UTC m=+145.813048966" watchObservedRunningTime="2025-11-26 22:23:37.152368765 +0000 UTC m=+145.842603675" Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.153497 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-5572p" event={"ID":"f432448b-0071-4b69-ba3c-a05a9fc20199","Type":"ContainerStarted","Data":"501ef1e928a7ce06322e6f64849f771faaa9fc4f36280877ccbc8ce38095817c"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.155751 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-sfnff" event={"ID":"1293736c-513c-490e-afb1-97df72e3e51c","Type":"ContainerStarted","Data":"0c65a672201deda910bae1751acee4707d74e6e324dc902e6ac61917271d791f"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.162050 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4q25l" event={"ID":"9024dcbb-8ec1-4cfc-853d-24f7974073d0","Type":"ContainerStarted","Data":"31bbfa5b24117c77ea6a5ecc17f1dff8c1e46f63bf0afddd7f2f867114c71b1c"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.162091 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4q25l" event={"ID":"9024dcbb-8ec1-4cfc-853d-24f7974073d0","Type":"ContainerStarted","Data":"4d5190a47ccf97fd5c39b5fb52bb79b0db44a90e8d44200ac0181d285f9ae2e6"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.165473 4903 generic.go:334] "Generic (PLEG): container finished" podID="849a13af-5a8d-43b2-b8d8-c07cd8bfe399" containerID="3a23c3bc96f6d7996c8d0a75c596081b6c5f16a9734ddef60a2495a7181e6cc2" exitCode=0 Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.166236 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" event={"ID":"849a13af-5a8d-43b2-b8d8-c07cd8bfe399","Type":"ContainerDied","Data":"3a23c3bc96f6d7996c8d0a75c596081b6c5f16a9734ddef60a2495a7181e6cc2"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.209421 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bzf7d" event={"ID":"d9fe1c64-2433-4dae-b8b6-3bfa57277181","Type":"ContainerStarted","Data":"1a8b4aa37f165f8527fad1fc6f1c9916ef8def64557083fc3c8da3edad80023a"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.209464 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bzf7d" event={"ID":"d9fe1c64-2433-4dae-b8b6-3bfa57277181","Type":"ContainerStarted","Data":"de8e1803e20e572c100d6881647a347816082e4274b22232e76db94d75ec8ee2"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.222778 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-g8r8c" event={"ID":"bebed1e9-678c-4908-bc22-45325ace05cc","Type":"ContainerStarted","Data":"9b7ec470a309981417c3683eddf469919182e34b1274bf6ee96f0fdb2edb94b3"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.222820 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-g8r8c" event={"ID":"bebed1e9-678c-4908-bc22-45325ace05cc","Type":"ContainerStarted","Data":"40271c94be6f30dde20f9781842e463e04684fd3b23837d7c6a88c71fd176780"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.226750 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-5qhmj" event={"ID":"0346d3c1-896e-4cae-889c-97e4b5acab20","Type":"ContainerStarted","Data":"7e81c8c56d0f3d9b7662305f28183fbc61f5f2e206077a3f01a6b8081f43da4d"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.240896 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-6vv92" event={"ID":"e4b6f08e-0222-4b7f-9eb7-2c6c37349efa","Type":"ContainerStarted","Data":"30bb7dccf1bc475f131c2e419612138e2568b4467ab3bc9e0cffb5fb1dea0a7d"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.253420 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:37 crc kubenswrapper[4903]: E1126 22:23:37.254888 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:37.754875938 +0000 UTC m=+146.445110848 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.278452 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pv6pw" event={"ID":"a3218153-38d5-4785-876a-7c3c73097f43","Type":"ContainerStarted","Data":"06efaab6584d8623d4da315e1eebdaa15a3e0af0231a210b25d8cc04d3e17358"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.285634 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-4fzk8" event={"ID":"6ca9e3c6-dc50-4e2a-9331-120254254241","Type":"ContainerStarted","Data":"26c88f5ec649226ceac988ae529a1a5b649b909cb4a9bc6180e3032adfb85ed3"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.299632 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mclwt" event={"ID":"74c40195-e9f0-43c0-b896-66e7215889d8","Type":"ContainerStarted","Data":"bd22598695d6ce5572d0b0aa0e9997c9bcaa6be6b9f1405e93ad1e361a9dfed8"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.302367 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-fjxzh" event={"ID":"c6c05d44-417f-4f52-a3ed-ee3eb183d452","Type":"ContainerStarted","Data":"4608817e211c325cc3974263c17e133b5d30624dd35a2977a1fe94ba971145a3"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.305343 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-fjxzh" Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.311397 4903 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-fjxzh container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.24:8443/healthz\": dial tcp 10.217.0.24:8443: connect: connection refused" start-of-body= Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.311434 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-fjxzh" podUID="c6c05d44-417f-4f52-a3ed-ee3eb183d452" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.24:8443/healthz\": dial tcp 10.217.0.24:8443: connect: connection refused" Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.313406 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gqd92" event={"ID":"2ca82453-f87b-4664-bc10-0b6f09c50187","Type":"ContainerStarted","Data":"994734a4db711d8abd1e0a25286fd0a40874a7476e07d0dcb3f556a68dff3044"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.330858 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-rxr2l" event={"ID":"44cea8ea-c6de-417e-b313-9d8beda2f7c6","Type":"ContainerStarted","Data":"7be234b321571f9485fee0f3f53aa5210690a947dff86994369fb38e000b05bb"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.330896 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-rxr2l" event={"ID":"44cea8ea-c6de-417e-b313-9d8beda2f7c6","Type":"ContainerStarted","Data":"584104a9b328c7ea4e4e34bdb9d5e3fdce8d8cdf9cad77b5d11154b77e83a646"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.344963 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-z9bs6" event={"ID":"9c9d0560-2a3a-45c0-a1db-f90926ec348b","Type":"ContainerStarted","Data":"b04a51392e4f92078736a85afec01c7ac05c3585c235b9634455435ee5db2cf2"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.362497 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:37 crc kubenswrapper[4903]: E1126 22:23:37.364001 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:37.863975022 +0000 UTC m=+146.554209932 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.374235 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" event={"ID":"2a44a668-4d2a-4e62-ae67-60314da59785","Type":"ContainerStarted","Data":"4c20d31bd87bfff45403f7d7599e62892ace453f295b43a6da0da06e06e31ccb"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.400971 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-27m7h" event={"ID":"e3590281-9d45-43f2-9b51-67aad451c66c","Type":"ContainerStarted","Data":"521169a45e18cdfda7a9e11e0c3deed32fb1ee8e1bba8ff725475874605a62ef"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.401009 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-27m7h" event={"ID":"e3590281-9d45-43f2-9b51-67aad451c66c","Type":"ContainerStarted","Data":"35b9d3d67c31c3a6cba83e0c300010617d4e1d365c3a0ffc061bd633a982eb2a"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.401544 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-27m7h" Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.416818 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403255-kmtfb" event={"ID":"6141f47a-9c9b-4ef8-85c9-518f947bff57","Type":"ContainerStarted","Data":"3d9940a8328fc086586c8fd22e96569a735b99934c1eace8d3806fc3305b9844"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.419046 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-dbzzj" event={"ID":"513976d0-8103-42f8-9091-538d9f27a0f2","Type":"ContainerStarted","Data":"459318becfe769eb6e21f8cb2c95b9fda9df749a2fc79125c5b94c9cac65cde4"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.429927 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-rxr2l" podStartSLOduration=5.429908925 podStartE2EDuration="5.429908925s" podCreationTimestamp="2025-11-26 22:23:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:37.38805165 +0000 UTC m=+146.078286560" watchObservedRunningTime="2025-11-26 22:23:37.429908925 +0000 UTC m=+146.120143835" Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.459233 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-x4kfj" event={"ID":"cea77f03-3be2-41bd-be01-cf09fb878b3d","Type":"ContainerStarted","Data":"79d2637e20cb5257469321fb9a3426107aa8c5e17b426d4ccc084b2415f20a38"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.465924 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:37 crc kubenswrapper[4903]: E1126 22:23:37.467112 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:37.967101142 +0000 UTC m=+146.657336052 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.491864 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-fjxzh" podStartSLOduration=124.491847628 podStartE2EDuration="2m4.491847628s" podCreationTimestamp="2025-11-26 22:21:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:37.489180995 +0000 UTC m=+146.179415905" watchObservedRunningTime="2025-11-26 22:23:37.491847628 +0000 UTC m=+146.182082538" Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.516089 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zg6dg" event={"ID":"4115a44a-84ae-4629-970e-16c25d4f59e1","Type":"ContainerStarted","Data":"87b1a6622694143f05601d5ee7444bee72fa1799b818afe155fac3dd465424f2"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.551152 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtskz" event={"ID":"e56e61b8-bfdf-4800-9bab-90c0d4bd87bc","Type":"ContainerStarted","Data":"d459931e5668e9598fb97ee33bb26ebbd9b338107eace7b11540cb40a7bfa953"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.566668 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:37 crc kubenswrapper[4903]: E1126 22:23:37.568215 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:38.068197987 +0000 UTC m=+146.758432897 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.574964 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-27m7h" podStartSLOduration=124.574946812 podStartE2EDuration="2m4.574946812s" podCreationTimestamp="2025-11-26 22:21:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:37.574401716 +0000 UTC m=+146.264636706" watchObservedRunningTime="2025-11-26 22:23:37.574946812 +0000 UTC m=+146.265181722" Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.575888 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29403255-kmtfb" podStartSLOduration=125.575882546 podStartE2EDuration="2m5.575882546s" podCreationTimestamp="2025-11-26 22:21:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:37.514951761 +0000 UTC m=+146.205186671" watchObservedRunningTime="2025-11-26 22:23:37.575882546 +0000 UTC m=+146.266117456" Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.593161 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-btk26" event={"ID":"2ac5c6d2-c134-4f7f-bfc0-16358351b0c7","Type":"ContainerStarted","Data":"796050ea741ab9308b3b8cc5c182588620d50aea98d78dd4dc4544b40e484173"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.599563 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-zb6l2" event={"ID":"72c47664-999f-45b2-b047-184bdc7d8c58","Type":"ContainerStarted","Data":"e55cf4b962d9edb0302c6ad4ba07db9ef017d53cac5808862b60747f960f385f"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.624419 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-7qxf7" event={"ID":"e9e3e3eb-d7d9-4495-bd83-4107f00ae04a","Type":"ContainerStarted","Data":"a749a15d6277b58c284fefd718865f9694e4ab9e06f5f4e3df11246a51350aeb"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.625191 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-7qxf7" Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.626646 4903 patch_prober.go:28] interesting pod/downloads-7954f5f757-7qxf7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" start-of-body= Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.626676 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7qxf7" podUID="e9e3e3eb-d7d9-4495-bd83-4107f00ae04a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.638161 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-7gs98" event={"ID":"bb685681-92a0-4b59-aa97-b02b9b4c73f9","Type":"ContainerStarted","Data":"8491e140fac6da7ec634430fdda220d6b44b7f0d4055940c6f1be8ad5a121d99"} Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.643177 4903 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-d8tw8 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.643212 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-d8tw8" podUID="15015a4f-e3d3-4042-bf77-70c01c7c05b6" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.643918 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.645157 4903 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-7dtgj container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.18:5443/healthz\": dial tcp 10.217.0.18:5443: connect: connection refused" start-of-body= Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.645184 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7dtgj" podUID="1e45d40a-645b-4c6f-b001-cb3c8beef2da" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.18:5443/healthz\": dial tcp 10.217.0.18:5443: connect: connection refused" Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.657529 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-rl8cq" Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.657848 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp" Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.669025 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zg6dg" podStartSLOduration=125.669011824 podStartE2EDuration="2m5.669011824s" podCreationTimestamp="2025-11-26 22:21:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:37.642569141 +0000 UTC m=+146.332804051" watchObservedRunningTime="2025-11-26 22:23:37.669011824 +0000 UTC m=+146.359246734" Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.669542 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-zb6l2" podStartSLOduration=124.669531588 podStartE2EDuration="2m4.669531588s" podCreationTimestamp="2025-11-26 22:21:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:37.66705474 +0000 UTC m=+146.357289650" watchObservedRunningTime="2025-11-26 22:23:37.669531588 +0000 UTC m=+146.359766488" Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.670260 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:37 crc kubenswrapper[4903]: E1126 22:23:37.672259 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:38.172231192 +0000 UTC m=+146.862466102 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.683598 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.704190 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtskz" podStartSLOduration=124.704173575 podStartE2EDuration="2m4.704173575s" podCreationTimestamp="2025-11-26 22:21:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:37.704040342 +0000 UTC m=+146.394275242" watchObservedRunningTime="2025-11-26 22:23:37.704173575 +0000 UTC m=+146.394408485" Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.729378 4903 patch_prober.go:28] interesting pod/router-default-5444994796-lxr2h container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 22:23:37 crc kubenswrapper[4903]: [-]has-synced failed: reason withheld Nov 26 22:23:37 crc kubenswrapper[4903]: [+]process-running ok Nov 26 22:23:37 crc kubenswrapper[4903]: healthz check failed Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.729744 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-lxr2h" podUID="9ea9e718-d061-4176-b950-12497aeba908" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.771326 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:37 crc kubenswrapper[4903]: E1126 22:23:37.773212 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:38.273196443 +0000 UTC m=+146.963431353 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.875063 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-7qxf7" podStartSLOduration=125.875045299 podStartE2EDuration="2m5.875045299s" podCreationTimestamp="2025-11-26 22:21:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:37.86707707 +0000 UTC m=+146.557311980" watchObservedRunningTime="2025-11-26 22:23:37.875045299 +0000 UTC m=+146.565280209" Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.876341 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:37 crc kubenswrapper[4903]: E1126 22:23:37.877301 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:38.37728889 +0000 UTC m=+147.067523800 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:37 crc kubenswrapper[4903]: I1126 22:23:37.978101 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:37 crc kubenswrapper[4903]: E1126 22:23:37.978505 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:38.478488178 +0000 UTC m=+147.168723088 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.002181 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-446xv" Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.081561 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:38 crc kubenswrapper[4903]: E1126 22:23:38.081942 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:38.581931017 +0000 UTC m=+147.272165927 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.182422 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:38 crc kubenswrapper[4903]: E1126 22:23:38.182640 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:38.68261375 +0000 UTC m=+147.372848660 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.182917 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:38 crc kubenswrapper[4903]: E1126 22:23:38.183226 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:38.683213646 +0000 UTC m=+147.373448556 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.284284 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:38 crc kubenswrapper[4903]: E1126 22:23:38.284464 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:38.784437494 +0000 UTC m=+147.474672394 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.284595 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:38 crc kubenswrapper[4903]: E1126 22:23:38.284913 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:38.784905858 +0000 UTC m=+147.475140768 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.385563 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:38 crc kubenswrapper[4903]: E1126 22:23:38.385777 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:38.885690224 +0000 UTC m=+147.575925134 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.386133 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:38 crc kubenswrapper[4903]: E1126 22:23:38.386437 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:38.886429174 +0000 UTC m=+147.576664084 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.487867 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:38 crc kubenswrapper[4903]: E1126 22:23:38.488240 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:38.988207687 +0000 UTC m=+147.678442607 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.488304 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:38 crc kubenswrapper[4903]: E1126 22:23:38.488613 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:38.988601348 +0000 UTC m=+147.678836258 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.589573 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:38 crc kubenswrapper[4903]: E1126 22:23:38.589810 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:39.089779385 +0000 UTC m=+147.780014305 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.589919 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:38 crc kubenswrapper[4903]: E1126 22:23:38.590373 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:39.090356321 +0000 UTC m=+147.780591231 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.642590 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pv6pw" event={"ID":"a3218153-38d5-4785-876a-7c3c73097f43","Type":"ContainerStarted","Data":"f76ff64c011730d66338ca3934f061414624f894ee79f254a768e146970edc82"} Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.643952 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-4fzk8" event={"ID":"6ca9e3c6-dc50-4e2a-9331-120254254241","Type":"ContainerStarted","Data":"0f18b31031c609872ec41a87699f814fe059d7eefba03c923eac6f6d557cf083"} Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.645273 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-dbzzj" event={"ID":"513976d0-8103-42f8-9091-538d9f27a0f2","Type":"ContainerStarted","Data":"5ed10283e39ce98506b34c9cda05bf9be8c6bd73796af5deceac37fb6a48e188"} Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.645314 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-dbzzj" event={"ID":"513976d0-8103-42f8-9091-538d9f27a0f2","Type":"ContainerStarted","Data":"23cb81eba54737c976fedeea625e3b646961be2200f13d13ac951617147e161b"} Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.645386 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-dbzzj" Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.646685 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-x4kfj" event={"ID":"cea77f03-3be2-41bd-be01-cf09fb878b3d","Type":"ContainerStarted","Data":"9fe822c1b44ae9a7018050b31cb72e816770b57fae00640509893157bbb5ca3e"} Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.648041 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-6vv92" event={"ID":"e4b6f08e-0222-4b7f-9eb7-2c6c37349efa","Type":"ContainerStarted","Data":"f8d189c7dd1106a7fe4bcee4c14c88dcd69c0e8407225bf75e7ecbf43baa8f77"} Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.649405 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-5572p" event={"ID":"f432448b-0071-4b69-ba3c-a05a9fc20199","Type":"ContainerStarted","Data":"07146c23367efb37c0b3d19a647c34aabc290919687854670ea14c9a301051f9"} Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.650783 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mclwt" event={"ID":"74c40195-e9f0-43c0-b896-66e7215889d8","Type":"ContainerStarted","Data":"128e889039aeacb936b248caf4d4407ec16b05a466c53ddbc150acb96e4d967c"} Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.652012 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-5qhmj" event={"ID":"0346d3c1-896e-4cae-889c-97e4b5acab20","Type":"ContainerStarted","Data":"216dccaa0858321087f20c24ea08f96b250c295ff273eba4b90e63a679429e98"} Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.653093 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gqd92" event={"ID":"2ca82453-f87b-4664-bc10-0b6f09c50187","Type":"ContainerStarted","Data":"ef638e38f52cf491218eccb5dc2c4319ca6ee954bb4c9362ed9c46341e9caa2c"} Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.654186 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-94zhz" event={"ID":"2e6e49c1-f210-4ee8-af41-9e43123ae910","Type":"ContainerStarted","Data":"e2850e2da8798f8cf1457ac6b6f84fd5bf2b605bf4671f51ace50c3bd71f6c0f"} Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.654385 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-94zhz" Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.655929 4903 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-94zhz container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.41:8080/healthz\": dial tcp 10.217.0.41:8080: connect: connection refused" start-of-body= Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.655982 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-94zhz" podUID="2e6e49c1-f210-4ee8-af41-9e43123ae910" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.41:8080/healthz\": dial tcp 10.217.0.41:8080: connect: connection refused" Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.656135 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-bs46d" event={"ID":"a7aa8fd5-6118-4467-9f65-f98cc2f45979","Type":"ContainerStarted","Data":"d4993a749fe55e70093306a4d9fb3c2b0761472ed81acbdb45b87289f25eb8c0"} Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.657819 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtskz" event={"ID":"e56e61b8-bfdf-4800-9bab-90c0d4bd87bc","Type":"ContainerStarted","Data":"6a371d4c16032409a5e36798be70d10a4922de5d43b4cee1822538bd61e61ab1"} Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.659209 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-btk26" event={"ID":"2ac5c6d2-c134-4f7f-bfc0-16358351b0c7","Type":"ContainerStarted","Data":"57b66563739bb4f6c41012effb7d3a06fff0fd6e392faf759b50f78dd0850826"} Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.660954 4903 generic.go:334] "Generic (PLEG): container finished" podID="2a44a668-4d2a-4e62-ae67-60314da59785" containerID="4c20d31bd87bfff45403f7d7599e62892ace453f295b43a6da0da06e06e31ccb" exitCode=0 Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.660999 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" event={"ID":"2a44a668-4d2a-4e62-ae67-60314da59785","Type":"ContainerDied","Data":"4c20d31bd87bfff45403f7d7599e62892ace453f295b43a6da0da06e06e31ccb"} Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.661035 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" event={"ID":"2a44a668-4d2a-4e62-ae67-60314da59785","Type":"ContainerStarted","Data":"f57f24485562f40f8e0c24bcefa2ad12d91b3081fd54f6928c27cba0094a3b0c"} Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.662223 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-z9bs6" event={"ID":"9c9d0560-2a3a-45c0-a1db-f90926ec348b","Type":"ContainerStarted","Data":"01d9fcc77c8dc91d34f4e56d6587ce5d05e84ae216f739acaccadd1f72600d1c"} Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.664043 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pv6pw" podStartSLOduration=125.664032015 podStartE2EDuration="2m5.664032015s" podCreationTimestamp="2025-11-26 22:21:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:38.663643995 +0000 UTC m=+147.353878905" watchObservedRunningTime="2025-11-26 22:23:38.664032015 +0000 UTC m=+147.354266925" Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.664060 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" event={"ID":"849a13af-5a8d-43b2-b8d8-c07cd8bfe399","Type":"ContainerStarted","Data":"56d32abecc955d00e87efbf74ec0df7190e95e4538b534b790b6efb258fd99c6"} Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.664099 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" event={"ID":"849a13af-5a8d-43b2-b8d8-c07cd8bfe399","Type":"ContainerStarted","Data":"c1e737275283ab24ccc12935826878e1d395c3994f8527b661b789081e4110e9"} Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.667623 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-btw2l" event={"ID":"6698acb2-1ed3-44af-ac70-8c11bdca5c6e","Type":"ContainerStarted","Data":"c7027c9a1f7485fe84ff530d7e1b638b0cfab8d203b897a2cfa94b7049239ac9"} Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.667666 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-btw2l" Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.668997 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-fgg79" event={"ID":"03bb801d-92a8-4a3a-a99b-9de804ba04ab","Type":"ContainerStarted","Data":"2a7ebd76322a7ccd5bfd10578be39f059d55944a26365178ed0a6d6b1e5f3b58"} Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.670486 4903 generic.go:334] "Generic (PLEG): container finished" podID="6141f47a-9c9b-4ef8-85c9-518f947bff57" containerID="3d9940a8328fc086586c8fd22e96569a735b99934c1eace8d3806fc3305b9844" exitCode=0 Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.670532 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403255-kmtfb" event={"ID":"6141f47a-9c9b-4ef8-85c9-518f947bff57","Type":"ContainerDied","Data":"3d9940a8328fc086586c8fd22e96569a735b99934c1eace8d3806fc3305b9844"} Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.673533 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4q25l" event={"ID":"9024dcbb-8ec1-4cfc-853d-24f7974073d0","Type":"ContainerStarted","Data":"34de958fbbe0cf43864f6b5359d62021f3094ca45eb240940259a01e9805b32d"} Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.674922 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-zb6l2" event={"ID":"72c47664-999f-45b2-b047-184bdc7d8c58","Type":"ContainerStarted","Data":"9575d1f84f1ab979f7855f27deab693ae50b30d4632f75c6300c0d6f953bb331"} Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.675970 4903 patch_prober.go:28] interesting pod/downloads-7954f5f757-7qxf7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" start-of-body= Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.675993 4903 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-fjxzh container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.24:8443/healthz\": dial tcp 10.217.0.24:8443: connect: connection refused" start-of-body= Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.676014 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7qxf7" podUID="e9e3e3eb-d7d9-4495-bd83-4107f00ae04a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.676025 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-fjxzh" podUID="c6c05d44-417f-4f52-a3ed-ee3eb183d452" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.24:8443/healthz\": dial tcp 10.217.0.24:8443: connect: connection refused" Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.691147 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.691823 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" podStartSLOduration=125.691794325 podStartE2EDuration="2m5.691794325s" podCreationTimestamp="2025-11-26 22:21:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:38.690366456 +0000 UTC m=+147.380601366" watchObservedRunningTime="2025-11-26 22:23:38.691794325 +0000 UTC m=+147.382029235" Nov 26 22:23:38 crc kubenswrapper[4903]: E1126 22:23:38.692495 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:39.192470914 +0000 UTC m=+147.882705824 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.736386 4903 patch_prober.go:28] interesting pod/router-default-5444994796-lxr2h container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 22:23:38 crc kubenswrapper[4903]: [-]has-synced failed: reason withheld Nov 26 22:23:38 crc kubenswrapper[4903]: [+]process-running ok Nov 26 22:23:38 crc kubenswrapper[4903]: healthz check failed Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.736433 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-lxr2h" podUID="9ea9e718-d061-4176-b950-12497aeba908" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.756767 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mclwt" podStartSLOduration=125.756753212 podStartE2EDuration="2m5.756753212s" podCreationTimestamp="2025-11-26 22:21:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:38.724424448 +0000 UTC m=+147.414659358" watchObservedRunningTime="2025-11-26 22:23:38.756753212 +0000 UTC m=+147.446988122" Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.758564 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-z9bs6" podStartSLOduration=125.758555651 podStartE2EDuration="2m5.758555651s" podCreationTimestamp="2025-11-26 22:21:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:38.755569279 +0000 UTC m=+147.445804189" watchObservedRunningTime="2025-11-26 22:23:38.758555651 +0000 UTC m=+147.448790561" Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.779150 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-5qhmj" podStartSLOduration=125.779135814 podStartE2EDuration="2m5.779135814s" podCreationTimestamp="2025-11-26 22:21:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:38.776354118 +0000 UTC m=+147.466589038" watchObservedRunningTime="2025-11-26 22:23:38.779135814 +0000 UTC m=+147.469370724" Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.794095 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:38 crc kubenswrapper[4903]: E1126 22:23:38.794462 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:39.294449863 +0000 UTC m=+147.984684773 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.817265 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-gqd92" podStartSLOduration=125.817250076 podStartE2EDuration="2m5.817250076s" podCreationTimestamp="2025-11-26 22:21:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:38.816035323 +0000 UTC m=+147.506270233" watchObservedRunningTime="2025-11-26 22:23:38.817250076 +0000 UTC m=+147.507484986" Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.818368 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-6vv92" podStartSLOduration=125.818363137 podStartE2EDuration="2m5.818363137s" podCreationTimestamp="2025-11-26 22:21:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:38.795859411 +0000 UTC m=+147.486094321" watchObservedRunningTime="2025-11-26 22:23:38.818363137 +0000 UTC m=+147.508598047" Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.837153 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-94zhz" podStartSLOduration=125.83713762 podStartE2EDuration="2m5.83713762s" podCreationTimestamp="2025-11-26 22:21:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:38.836154453 +0000 UTC m=+147.526389363" watchObservedRunningTime="2025-11-26 22:23:38.83713762 +0000 UTC m=+147.527372530" Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.860092 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-mmswh"] Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.861756 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mmswh" Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.871432 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.872248 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-dbzzj" podStartSLOduration=6.87222674 podStartE2EDuration="6.87222674s" podCreationTimestamp="2025-11-26 22:23:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:38.871262403 +0000 UTC m=+147.561497333" watchObservedRunningTime="2025-11-26 22:23:38.87222674 +0000 UTC m=+147.562461650" Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.900433 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.900641 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7717a36b-65f8-4c9a-b4be-5ab83fe77c99-catalog-content\") pod \"community-operators-mmswh\" (UID: \"7717a36b-65f8-4c9a-b4be-5ab83fe77c99\") " pod="openshift-marketplace/community-operators-mmswh" Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.900748 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4dd7\" (UniqueName: \"kubernetes.io/projected/7717a36b-65f8-4c9a-b4be-5ab83fe77c99-kube-api-access-v4dd7\") pod \"community-operators-mmswh\" (UID: \"7717a36b-65f8-4c9a-b4be-5ab83fe77c99\") " pod="openshift-marketplace/community-operators-mmswh" Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.900783 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7717a36b-65f8-4c9a-b4be-5ab83fe77c99-utilities\") pod \"community-operators-mmswh\" (UID: \"7717a36b-65f8-4c9a-b4be-5ab83fe77c99\") " pod="openshift-marketplace/community-operators-mmswh" Nov 26 22:23:38 crc kubenswrapper[4903]: E1126 22:23:38.900863 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:39.400850203 +0000 UTC m=+148.091085113 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.924546 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mmswh"] Nov 26 22:23:38 crc kubenswrapper[4903]: I1126 22:23:38.960094 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-5572p" podStartSLOduration=125.960074852 podStartE2EDuration="2m5.960074852s" podCreationTimestamp="2025-11-26 22:21:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:38.958871139 +0000 UTC m=+147.649106049" watchObservedRunningTime="2025-11-26 22:23:38.960074852 +0000 UTC m=+147.650309762" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.003322 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.003389 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4dd7\" (UniqueName: \"kubernetes.io/projected/7717a36b-65f8-4c9a-b4be-5ab83fe77c99-kube-api-access-v4dd7\") pod \"community-operators-mmswh\" (UID: \"7717a36b-65f8-4c9a-b4be-5ab83fe77c99\") " pod="openshift-marketplace/community-operators-mmswh" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.003424 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7717a36b-65f8-4c9a-b4be-5ab83fe77c99-utilities\") pod \"community-operators-mmswh\" (UID: \"7717a36b-65f8-4c9a-b4be-5ab83fe77c99\") " pod="openshift-marketplace/community-operators-mmswh" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.003451 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7717a36b-65f8-4c9a-b4be-5ab83fe77c99-catalog-content\") pod \"community-operators-mmswh\" (UID: \"7717a36b-65f8-4c9a-b4be-5ab83fe77c99\") " pod="openshift-marketplace/community-operators-mmswh" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.003863 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7717a36b-65f8-4c9a-b4be-5ab83fe77c99-catalog-content\") pod \"community-operators-mmswh\" (UID: \"7717a36b-65f8-4c9a-b4be-5ab83fe77c99\") " pod="openshift-marketplace/community-operators-mmswh" Nov 26 22:23:39 crc kubenswrapper[4903]: E1126 22:23:39.004133 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:39.504117066 +0000 UTC m=+148.194351976 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.004261 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7717a36b-65f8-4c9a-b4be-5ab83fe77c99-utilities\") pod \"community-operators-mmswh\" (UID: \"7717a36b-65f8-4c9a-b4be-5ab83fe77c99\") " pod="openshift-marketplace/community-operators-mmswh" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.028391 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4dd7\" (UniqueName: \"kubernetes.io/projected/7717a36b-65f8-4c9a-b4be-5ab83fe77c99-kube-api-access-v4dd7\") pod \"community-operators-mmswh\" (UID: \"7717a36b-65f8-4c9a-b4be-5ab83fe77c99\") " pod="openshift-marketplace/community-operators-mmswh" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.048764 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-4fzk8" podStartSLOduration=126.048749297 podStartE2EDuration="2m6.048749297s" podCreationTimestamp="2025-11-26 22:21:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:39.045086247 +0000 UTC m=+147.735321157" watchObservedRunningTime="2025-11-26 22:23:39.048749297 +0000 UTC m=+147.738984197" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.050906 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-x4kfj" podStartSLOduration=127.050898426 podStartE2EDuration="2m7.050898426s" podCreationTimestamp="2025-11-26 22:21:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:38.996958691 +0000 UTC m=+147.687193601" watchObservedRunningTime="2025-11-26 22:23:39.050898426 +0000 UTC m=+147.741133336" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.058133 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-z4f6g"] Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.059123 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z4f6g" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.061431 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.090932 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-bs46d" podStartSLOduration=7.09091937 podStartE2EDuration="7.09091937s" podCreationTimestamp="2025-11-26 22:23:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:39.088939156 +0000 UTC m=+147.779174066" watchObservedRunningTime="2025-11-26 22:23:39.09091937 +0000 UTC m=+147.781154280" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.094926 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z4f6g"] Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.109601 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:39 crc kubenswrapper[4903]: E1126 22:23:39.109822 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:39.609794377 +0000 UTC m=+148.300029287 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.110379 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8lrz\" (UniqueName: \"kubernetes.io/projected/ee178cda-7bde-4997-8554-e20c3548288b-kube-api-access-p8lrz\") pod \"certified-operators-z4f6g\" (UID: \"ee178cda-7bde-4997-8554-e20c3548288b\") " pod="openshift-marketplace/certified-operators-z4f6g" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.110492 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee178cda-7bde-4997-8554-e20c3548288b-utilities\") pod \"certified-operators-z4f6g\" (UID: \"ee178cda-7bde-4997-8554-e20c3548288b\") " pod="openshift-marketplace/certified-operators-z4f6g" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.110638 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.110733 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee178cda-7bde-4997-8554-e20c3548288b-catalog-content\") pod \"certified-operators-z4f6g\" (UID: \"ee178cda-7bde-4997-8554-e20c3548288b\") " pod="openshift-marketplace/certified-operators-z4f6g" Nov 26 22:23:39 crc kubenswrapper[4903]: E1126 22:23:39.111100 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:39.611086632 +0000 UTC m=+148.301321542 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.149874 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-fgg79" podStartSLOduration=126.149860002 podStartE2EDuration="2m6.149860002s" podCreationTimestamp="2025-11-26 22:21:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:39.148097494 +0000 UTC m=+147.838332404" watchObservedRunningTime="2025-11-26 22:23:39.149860002 +0000 UTC m=+147.840094912" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.150575 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-btk26" podStartSLOduration=126.150570932 podStartE2EDuration="2m6.150570932s" podCreationTimestamp="2025-11-26 22:21:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:39.11940766 +0000 UTC m=+147.809642570" watchObservedRunningTime="2025-11-26 22:23:39.150570932 +0000 UTC m=+147.840805842" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.156153 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7dtgj" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.174100 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-sfnff" podStartSLOduration=127.174082155 podStartE2EDuration="2m7.174082155s" podCreationTimestamp="2025-11-26 22:21:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:39.172466561 +0000 UTC m=+147.862701471" watchObservedRunningTime="2025-11-26 22:23:39.174082155 +0000 UTC m=+147.864317055" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.187458 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mmswh" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.212032 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.212185 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee178cda-7bde-4997-8554-e20c3548288b-catalog-content\") pod \"certified-operators-z4f6g\" (UID: \"ee178cda-7bde-4997-8554-e20c3548288b\") " pod="openshift-marketplace/certified-operators-z4f6g" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.212248 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8lrz\" (UniqueName: \"kubernetes.io/projected/ee178cda-7bde-4997-8554-e20c3548288b-kube-api-access-p8lrz\") pod \"certified-operators-z4f6g\" (UID: \"ee178cda-7bde-4997-8554-e20c3548288b\") " pod="openshift-marketplace/certified-operators-z4f6g" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.212273 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee178cda-7bde-4997-8554-e20c3548288b-utilities\") pod \"certified-operators-z4f6g\" (UID: \"ee178cda-7bde-4997-8554-e20c3548288b\") " pod="openshift-marketplace/certified-operators-z4f6g" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.212575 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee178cda-7bde-4997-8554-e20c3548288b-utilities\") pod \"certified-operators-z4f6g\" (UID: \"ee178cda-7bde-4997-8554-e20c3548288b\") " pod="openshift-marketplace/certified-operators-z4f6g" Nov 26 22:23:39 crc kubenswrapper[4903]: E1126 22:23:39.212649 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:39.712634339 +0000 UTC m=+148.402869249 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.212881 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee178cda-7bde-4997-8554-e20c3548288b-catalog-content\") pod \"certified-operators-z4f6g\" (UID: \"ee178cda-7bde-4997-8554-e20c3548288b\") " pod="openshift-marketplace/certified-operators-z4f6g" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.253475 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8lrz\" (UniqueName: \"kubernetes.io/projected/ee178cda-7bde-4997-8554-e20c3548288b-kube-api-access-p8lrz\") pod \"certified-operators-z4f6g\" (UID: \"ee178cda-7bde-4997-8554-e20c3548288b\") " pod="openshift-marketplace/certified-operators-z4f6g" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.258389 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-bzf7d" podStartSLOduration=127.25837275 podStartE2EDuration="2m7.25837275s" podCreationTimestamp="2025-11-26 22:21:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:39.246821624 +0000 UTC m=+147.937056534" watchObservedRunningTime="2025-11-26 22:23:39.25837275 +0000 UTC m=+147.948607660" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.258644 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-ddms7"] Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.260411 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ddms7" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.294010 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ddms7"] Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.294377 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" podStartSLOduration=127.294360465 podStartE2EDuration="2m7.294360465s" podCreationTimestamp="2025-11-26 22:21:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:39.279654362 +0000 UTC m=+147.969889272" watchObservedRunningTime="2025-11-26 22:23:39.294360465 +0000 UTC m=+147.984595375" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.317399 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5cf807b-aa56-4158-90c9-519d5e076459-utilities\") pod \"community-operators-ddms7\" (UID: \"e5cf807b-aa56-4158-90c9-519d5e076459\") " pod="openshift-marketplace/community-operators-ddms7" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.317772 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbwps\" (UniqueName: \"kubernetes.io/projected/e5cf807b-aa56-4158-90c9-519d5e076459-kube-api-access-bbwps\") pod \"community-operators-ddms7\" (UID: \"e5cf807b-aa56-4158-90c9-519d5e076459\") " pod="openshift-marketplace/community-operators-ddms7" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.317820 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5cf807b-aa56-4158-90c9-519d5e076459-catalog-content\") pod \"community-operators-ddms7\" (UID: \"e5cf807b-aa56-4158-90c9-519d5e076459\") " pod="openshift-marketplace/community-operators-ddms7" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.317866 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:39 crc kubenswrapper[4903]: E1126 22:23:39.318230 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:39.818218977 +0000 UTC m=+148.508453887 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.340999 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-4q25l" podStartSLOduration=126.340983919 podStartE2EDuration="2m6.340983919s" podCreationTimestamp="2025-11-26 22:21:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:39.304545653 +0000 UTC m=+147.994780563" watchObservedRunningTime="2025-11-26 22:23:39.340983919 +0000 UTC m=+148.031218819" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.341545 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-btw2l" podStartSLOduration=127.341539464 podStartE2EDuration="2m7.341539464s" podCreationTimestamp="2025-11-26 22:21:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:39.341106463 +0000 UTC m=+148.031341373" watchObservedRunningTime="2025-11-26 22:23:39.341539464 +0000 UTC m=+148.031774374" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.373938 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z4f6g" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.402647 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-g8r8c" podStartSLOduration=126.402633165 podStartE2EDuration="2m6.402633165s" podCreationTimestamp="2025-11-26 22:21:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:39.401297289 +0000 UTC m=+148.091532199" watchObservedRunningTime="2025-11-26 22:23:39.402633165 +0000 UTC m=+148.092868075" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.429709 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.429907 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5cf807b-aa56-4158-90c9-519d5e076459-catalog-content\") pod \"community-operators-ddms7\" (UID: \"e5cf807b-aa56-4158-90c9-519d5e076459\") " pod="openshift-marketplace/community-operators-ddms7" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.430018 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5cf807b-aa56-4158-90c9-519d5e076459-utilities\") pod \"community-operators-ddms7\" (UID: \"e5cf807b-aa56-4158-90c9-519d5e076459\") " pod="openshift-marketplace/community-operators-ddms7" Nov 26 22:23:39 crc kubenswrapper[4903]: E1126 22:23:39.430134 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:39.930109437 +0000 UTC m=+148.620344347 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.430530 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5cf807b-aa56-4158-90c9-519d5e076459-catalog-content\") pod \"community-operators-ddms7\" (UID: \"e5cf807b-aa56-4158-90c9-519d5e076459\") " pod="openshift-marketplace/community-operators-ddms7" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.430650 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbwps\" (UniqueName: \"kubernetes.io/projected/e5cf807b-aa56-4158-90c9-519d5e076459-kube-api-access-bbwps\") pod \"community-operators-ddms7\" (UID: \"e5cf807b-aa56-4158-90c9-519d5e076459\") " pod="openshift-marketplace/community-operators-ddms7" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.431196 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5cf807b-aa56-4158-90c9-519d5e076459-utilities\") pod \"community-operators-ddms7\" (UID: \"e5cf807b-aa56-4158-90c9-519d5e076459\") " pod="openshift-marketplace/community-operators-ddms7" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.458793 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbwps\" (UniqueName: \"kubernetes.io/projected/e5cf807b-aa56-4158-90c9-519d5e076459-kube-api-access-bbwps\") pod \"community-operators-ddms7\" (UID: \"e5cf807b-aa56-4158-90c9-519d5e076459\") " pod="openshift-marketplace/community-operators-ddms7" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.466319 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-6z5bc"] Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.467215 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6z5bc" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.505825 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6z5bc"] Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.532006 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.532266 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9z2pk\" (UniqueName: \"kubernetes.io/projected/747780f1-ef94-4292-b306-4cce345c6a34-kube-api-access-9z2pk\") pod \"certified-operators-6z5bc\" (UID: \"747780f1-ef94-4292-b306-4cce345c6a34\") " pod="openshift-marketplace/certified-operators-6z5bc" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.532407 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/747780f1-ef94-4292-b306-4cce345c6a34-catalog-content\") pod \"certified-operators-6z5bc\" (UID: \"747780f1-ef94-4292-b306-4cce345c6a34\") " pod="openshift-marketplace/certified-operators-6z5bc" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.532519 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/747780f1-ef94-4292-b306-4cce345c6a34-utilities\") pod \"certified-operators-6z5bc\" (UID: \"747780f1-ef94-4292-b306-4cce345c6a34\") " pod="openshift-marketplace/certified-operators-6z5bc" Nov 26 22:23:39 crc kubenswrapper[4903]: E1126 22:23:39.533067 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:40.033048842 +0000 UTC m=+148.723283752 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.609715 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ddms7" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.638860 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.639188 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9z2pk\" (UniqueName: \"kubernetes.io/projected/747780f1-ef94-4292-b306-4cce345c6a34-kube-api-access-9z2pk\") pod \"certified-operators-6z5bc\" (UID: \"747780f1-ef94-4292-b306-4cce345c6a34\") " pod="openshift-marketplace/certified-operators-6z5bc" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.639218 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/747780f1-ef94-4292-b306-4cce345c6a34-catalog-content\") pod \"certified-operators-6z5bc\" (UID: \"747780f1-ef94-4292-b306-4cce345c6a34\") " pod="openshift-marketplace/certified-operators-6z5bc" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.639236 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/747780f1-ef94-4292-b306-4cce345c6a34-utilities\") pod \"certified-operators-6z5bc\" (UID: \"747780f1-ef94-4292-b306-4cce345c6a34\") " pod="openshift-marketplace/certified-operators-6z5bc" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.639914 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/747780f1-ef94-4292-b306-4cce345c6a34-utilities\") pod \"certified-operators-6z5bc\" (UID: \"747780f1-ef94-4292-b306-4cce345c6a34\") " pod="openshift-marketplace/certified-operators-6z5bc" Nov 26 22:23:39 crc kubenswrapper[4903]: E1126 22:23:39.639986 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:40.139972386 +0000 UTC m=+148.830207296 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.640394 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/747780f1-ef94-4292-b306-4cce345c6a34-catalog-content\") pod \"certified-operators-6z5bc\" (UID: \"747780f1-ef94-4292-b306-4cce345c6a34\") " pod="openshift-marketplace/certified-operators-6z5bc" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.698613 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9z2pk\" (UniqueName: \"kubernetes.io/projected/747780f1-ef94-4292-b306-4cce345c6a34-kube-api-access-9z2pk\") pod \"certified-operators-6z5bc\" (UID: \"747780f1-ef94-4292-b306-4cce345c6a34\") " pod="openshift-marketplace/certified-operators-6z5bc" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.730684 4903 patch_prober.go:28] interesting pod/router-default-5444994796-lxr2h container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 22:23:39 crc kubenswrapper[4903]: [-]has-synced failed: reason withheld Nov 26 22:23:39 crc kubenswrapper[4903]: [+]process-running ok Nov 26 22:23:39 crc kubenswrapper[4903]: healthz check failed Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.730996 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-lxr2h" podUID="9ea9e718-d061-4176-b950-12497aeba908" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.740261 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:39 crc kubenswrapper[4903]: E1126 22:23:39.740606 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:40.240594128 +0000 UTC m=+148.930829038 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.755539 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-7gs98" event={"ID":"bb685681-92a0-4b59-aa97-b02b9b4c73f9","Type":"ContainerStarted","Data":"ca70d34058f69fb2e8465ddb4c6047a8a9f4ccfa6e6af822803b0c46f15e386a"} Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.757205 4903 patch_prober.go:28] interesting pod/downloads-7954f5f757-7qxf7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" start-of-body= Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.757254 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7qxf7" podUID="e9e3e3eb-d7d9-4495-bd83-4107f00ae04a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.757307 4903 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-94zhz container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.41:8080/healthz\": dial tcp 10.217.0.41:8080: connect: connection refused" start-of-body= Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.757359 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-94zhz" podUID="2e6e49c1-f210-4ee8-af41-9e43123ae910" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.41:8080/healthz\": dial tcp 10.217.0.41:8080: connect: connection refused" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.775103 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-fjxzh" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.775435 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.775677 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.798831 4903 patch_prober.go:28] interesting pod/apiserver-76f77b778f-tjpcm container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="Get \"https://10.217.0.7:8443/livez\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.798895 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" podUID="849a13af-5a8d-43b2-b8d8-c07cd8bfe399" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.7:8443/livez\": dial tcp 10.217.0.7:8443: connect: connection refused" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.818134 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6z5bc" Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.825019 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mmswh"] Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.841667 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:39 crc kubenswrapper[4903]: E1126 22:23:39.843322 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:40.343304277 +0000 UTC m=+149.033539187 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.937326 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z4f6g"] Nov 26 22:23:39 crc kubenswrapper[4903]: I1126 22:23:39.946541 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:39 crc kubenswrapper[4903]: E1126 22:23:39.946867 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:40.446856589 +0000 UTC m=+149.137091499 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:40 crc kubenswrapper[4903]: I1126 22:23:40.048909 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:40 crc kubenswrapper[4903]: E1126 22:23:40.049528 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:40.549509796 +0000 UTC m=+149.239744706 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:40 crc kubenswrapper[4903]: I1126 22:23:40.151638 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:40 crc kubenswrapper[4903]: E1126 22:23:40.152591 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:40.652580625 +0000 UTC m=+149.342815525 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:40 crc kubenswrapper[4903]: I1126 22:23:40.253154 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:40 crc kubenswrapper[4903]: E1126 22:23:40.253481 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:40.753464875 +0000 UTC m=+149.443699785 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:40 crc kubenswrapper[4903]: I1126 22:23:40.253615 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:40 crc kubenswrapper[4903]: E1126 22:23:40.253970 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:40.753963568 +0000 UTC m=+149.444198478 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:40 crc kubenswrapper[4903]: I1126 22:23:40.282844 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" Nov 26 22:23:40 crc kubenswrapper[4903]: I1126 22:23:40.282912 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" Nov 26 22:23:40 crc kubenswrapper[4903]: I1126 22:23:40.355010 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:40 crc kubenswrapper[4903]: E1126 22:23:40.355341 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:40.85532739 +0000 UTC m=+149.545562290 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:40 crc kubenswrapper[4903]: I1126 22:23:40.456737 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:40 crc kubenswrapper[4903]: E1126 22:23:40.457112 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:40.957101043 +0000 UTC m=+149.647335953 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:40 crc kubenswrapper[4903]: I1126 22:23:40.558472 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:40 crc kubenswrapper[4903]: E1126 22:23:40.558787 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:41.058775984 +0000 UTC m=+149.749010894 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:40 crc kubenswrapper[4903]: I1126 22:23:40.659838 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:40 crc kubenswrapper[4903]: E1126 22:23:40.660125 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:41.160114295 +0000 UTC m=+149.850349205 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:40 crc kubenswrapper[4903]: I1126 22:23:40.759049 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z4f6g" event={"ID":"ee178cda-7bde-4997-8554-e20c3548288b","Type":"ContainerStarted","Data":"0d5abc3dea573597f875ab724f2ea29f041583f19f7903b6a43599ddb873c44e"} Nov 26 22:23:40 crc kubenswrapper[4903]: I1126 22:23:40.760242 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:40 crc kubenswrapper[4903]: I1126 22:23:40.760835 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mmswh" event={"ID":"7717a36b-65f8-4c9a-b4be-5ab83fe77c99","Type":"ContainerStarted","Data":"2e8c15b417bc3cc44a5f5314328ac526d67a90fef70a6026cda9ce6b54fe857b"} Nov 26 22:23:40 crc kubenswrapper[4903]: I1126 22:23:40.760954 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:23:40 crc kubenswrapper[4903]: E1126 22:23:40.761438 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:41.261414445 +0000 UTC m=+149.951649355 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:40 crc kubenswrapper[4903]: I1126 22:23:40.771433 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:23:40 crc kubenswrapper[4903]: I1126 22:23:40.805209 4903 patch_prober.go:28] interesting pod/router-default-5444994796-lxr2h container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 22:23:40 crc kubenswrapper[4903]: [-]has-synced failed: reason withheld Nov 26 22:23:40 crc kubenswrapper[4903]: [+]process-running ok Nov 26 22:23:40 crc kubenswrapper[4903]: healthz check failed Nov 26 22:23:40 crc kubenswrapper[4903]: I1126 22:23:40.805259 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-lxr2h" podUID="9ea9e718-d061-4176-b950-12497aeba908" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 22:23:40 crc kubenswrapper[4903]: I1126 22:23:40.874034 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:40 crc kubenswrapper[4903]: I1126 22:23:40.874293 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:23:40 crc kubenswrapper[4903]: I1126 22:23:40.874337 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:23:40 crc kubenswrapper[4903]: I1126 22:23:40.874426 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:23:40 crc kubenswrapper[4903]: I1126 22:23:40.888204 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ddms7"] Nov 26 22:23:40 crc kubenswrapper[4903]: E1126 22:23:40.890857 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:41.390841845 +0000 UTC m=+150.081076755 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:40 crc kubenswrapper[4903]: I1126 22:23:40.907557 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:23:40 crc kubenswrapper[4903]: I1126 22:23:40.920072 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:23:40 crc kubenswrapper[4903]: I1126 22:23:40.920587 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:23:40 crc kubenswrapper[4903]: I1126 22:23:40.960280 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:23:40 crc kubenswrapper[4903]: I1126 22:23:40.976954 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:40 crc kubenswrapper[4903]: E1126 22:23:40.977489 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:41.477473385 +0000 UTC m=+150.167708295 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:40 crc kubenswrapper[4903]: I1126 22:23:40.991060 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-btw2l" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.052055 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.066957 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.067421 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-fttb4"] Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.072603 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fttb4" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.080901 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.080942 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:41 crc kubenswrapper[4903]: E1126 22:23:41.081508 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:41.581497089 +0000 UTC m=+150.271731999 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.113181 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fttb4"] Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.184411 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.184849 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44dc303d-e9c6-4ced-9472-715779cd0dba-catalog-content\") pod \"redhat-marketplace-fttb4\" (UID: \"44dc303d-e9c6-4ced-9472-715779cd0dba\") " pod="openshift-marketplace/redhat-marketplace-fttb4" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.184899 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44dc303d-e9c6-4ced-9472-715779cd0dba-utilities\") pod \"redhat-marketplace-fttb4\" (UID: \"44dc303d-e9c6-4ced-9472-715779cd0dba\") " pod="openshift-marketplace/redhat-marketplace-fttb4" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.184933 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pflfd\" (UniqueName: \"kubernetes.io/projected/44dc303d-e9c6-4ced-9472-715779cd0dba-kube-api-access-pflfd\") pod \"redhat-marketplace-fttb4\" (UID: \"44dc303d-e9c6-4ced-9472-715779cd0dba\") " pod="openshift-marketplace/redhat-marketplace-fttb4" Nov 26 22:23:41 crc kubenswrapper[4903]: E1126 22:23:41.185055 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:41.685039801 +0000 UTC m=+150.375274711 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.275424 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6z5bc"] Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.289368 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44dc303d-e9c6-4ced-9472-715779cd0dba-catalog-content\") pod \"redhat-marketplace-fttb4\" (UID: \"44dc303d-e9c6-4ced-9472-715779cd0dba\") " pod="openshift-marketplace/redhat-marketplace-fttb4" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.289412 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.289441 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44dc303d-e9c6-4ced-9472-715779cd0dba-utilities\") pod \"redhat-marketplace-fttb4\" (UID: \"44dc303d-e9c6-4ced-9472-715779cd0dba\") " pod="openshift-marketplace/redhat-marketplace-fttb4" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.289473 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pflfd\" (UniqueName: \"kubernetes.io/projected/44dc303d-e9c6-4ced-9472-715779cd0dba-kube-api-access-pflfd\") pod \"redhat-marketplace-fttb4\" (UID: \"44dc303d-e9c6-4ced-9472-715779cd0dba\") " pod="openshift-marketplace/redhat-marketplace-fttb4" Nov 26 22:23:41 crc kubenswrapper[4903]: E1126 22:23:41.289945 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:41.78993462 +0000 UTC m=+150.480169520 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.290147 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44dc303d-e9c6-4ced-9472-715779cd0dba-utilities\") pod \"redhat-marketplace-fttb4\" (UID: \"44dc303d-e9c6-4ced-9472-715779cd0dba\") " pod="openshift-marketplace/redhat-marketplace-fttb4" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.290567 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44dc303d-e9c6-4ced-9472-715779cd0dba-catalog-content\") pod \"redhat-marketplace-fttb4\" (UID: \"44dc303d-e9c6-4ced-9472-715779cd0dba\") " pod="openshift-marketplace/redhat-marketplace-fttb4" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.304520 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403255-kmtfb" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.329171 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.344084 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pflfd\" (UniqueName: \"kubernetes.io/projected/44dc303d-e9c6-4ced-9472-715779cd0dba-kube-api-access-pflfd\") pod \"redhat-marketplace-fttb4\" (UID: \"44dc303d-e9c6-4ced-9472-715779cd0dba\") " pod="openshift-marketplace/redhat-marketplace-fttb4" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.393448 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ftrmx\" (UniqueName: \"kubernetes.io/projected/6141f47a-9c9b-4ef8-85c9-518f947bff57-kube-api-access-ftrmx\") pod \"6141f47a-9c9b-4ef8-85c9-518f947bff57\" (UID: \"6141f47a-9c9b-4ef8-85c9-518f947bff57\") " Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.393683 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.393732 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6141f47a-9c9b-4ef8-85c9-518f947bff57-secret-volume\") pod \"6141f47a-9c9b-4ef8-85c9-518f947bff57\" (UID: \"6141f47a-9c9b-4ef8-85c9-518f947bff57\") " Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.393795 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6141f47a-9c9b-4ef8-85c9-518f947bff57-config-volume\") pod \"6141f47a-9c9b-4ef8-85c9-518f947bff57\" (UID: \"6141f47a-9c9b-4ef8-85c9-518f947bff57\") " Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.394563 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6141f47a-9c9b-4ef8-85c9-518f947bff57-config-volume" (OuterVolumeSpecName: "config-volume") pod "6141f47a-9c9b-4ef8-85c9-518f947bff57" (UID: "6141f47a-9c9b-4ef8-85c9-518f947bff57"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.399134 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6141f47a-9c9b-4ef8-85c9-518f947bff57-kube-api-access-ftrmx" (OuterVolumeSpecName: "kube-api-access-ftrmx") pod "6141f47a-9c9b-4ef8-85c9-518f947bff57" (UID: "6141f47a-9c9b-4ef8-85c9-518f947bff57"). InnerVolumeSpecName "kube-api-access-ftrmx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:23:41 crc kubenswrapper[4903]: E1126 22:23:41.399446 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:41.899423784 +0000 UTC m=+150.589658694 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.402044 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6141f47a-9c9b-4ef8-85c9-518f947bff57-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "6141f47a-9c9b-4ef8-85c9-518f947bff57" (UID: "6141f47a-9c9b-4ef8-85c9-518f947bff57"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.456522 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-kg68t"] Nov 26 22:23:41 crc kubenswrapper[4903]: E1126 22:23:41.459391 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6141f47a-9c9b-4ef8-85c9-518f947bff57" containerName="collect-profiles" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.459407 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="6141f47a-9c9b-4ef8-85c9-518f947bff57" containerName="collect-profiles" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.459522 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="6141f47a-9c9b-4ef8-85c9-518f947bff57" containerName="collect-profiles" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.460164 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kg68t" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.485715 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kg68t"] Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.503406 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.503469 4903 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6141f47a-9c9b-4ef8-85c9-518f947bff57-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.503483 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ftrmx\" (UniqueName: \"kubernetes.io/projected/6141f47a-9c9b-4ef8-85c9-518f947bff57-kube-api-access-ftrmx\") on node \"crc\" DevicePath \"\"" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.503493 4903 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6141f47a-9c9b-4ef8-85c9-518f947bff57-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 22:23:41 crc kubenswrapper[4903]: E1126 22:23:41.503762 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:42.003749988 +0000 UTC m=+150.693984898 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.556960 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fttb4" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.604152 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.604332 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ebb30834-3c49-4766-993b-d52693207694-catalog-content\") pod \"redhat-marketplace-kg68t\" (UID: \"ebb30834-3c49-4766-993b-d52693207694\") " pod="openshift-marketplace/redhat-marketplace-kg68t" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.604399 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jqckx\" (UniqueName: \"kubernetes.io/projected/ebb30834-3c49-4766-993b-d52693207694-kube-api-access-jqckx\") pod \"redhat-marketplace-kg68t\" (UID: \"ebb30834-3c49-4766-993b-d52693207694\") " pod="openshift-marketplace/redhat-marketplace-kg68t" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.604420 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ebb30834-3c49-4766-993b-d52693207694-utilities\") pod \"redhat-marketplace-kg68t\" (UID: \"ebb30834-3c49-4766-993b-d52693207694\") " pod="openshift-marketplace/redhat-marketplace-kg68t" Nov 26 22:23:41 crc kubenswrapper[4903]: E1126 22:23:41.604526 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:42.104510903 +0000 UTC m=+150.794745813 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.706212 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ebb30834-3c49-4766-993b-d52693207694-catalog-content\") pod \"redhat-marketplace-kg68t\" (UID: \"ebb30834-3c49-4766-993b-d52693207694\") " pod="openshift-marketplace/redhat-marketplace-kg68t" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.706541 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.706611 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jqckx\" (UniqueName: \"kubernetes.io/projected/ebb30834-3c49-4766-993b-d52693207694-kube-api-access-jqckx\") pod \"redhat-marketplace-kg68t\" (UID: \"ebb30834-3c49-4766-993b-d52693207694\") " pod="openshift-marketplace/redhat-marketplace-kg68t" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.706633 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ebb30834-3c49-4766-993b-d52693207694-utilities\") pod \"redhat-marketplace-kg68t\" (UID: \"ebb30834-3c49-4766-993b-d52693207694\") " pod="openshift-marketplace/redhat-marketplace-kg68t" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.707471 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ebb30834-3c49-4766-993b-d52693207694-utilities\") pod \"redhat-marketplace-kg68t\" (UID: \"ebb30834-3c49-4766-993b-d52693207694\") " pod="openshift-marketplace/redhat-marketplace-kg68t" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.707766 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ebb30834-3c49-4766-993b-d52693207694-catalog-content\") pod \"redhat-marketplace-kg68t\" (UID: \"ebb30834-3c49-4766-993b-d52693207694\") " pod="openshift-marketplace/redhat-marketplace-kg68t" Nov 26 22:23:41 crc kubenswrapper[4903]: E1126 22:23:41.708043 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:42.208025943 +0000 UTC m=+150.898260853 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.736851 4903 patch_prober.go:28] interesting pod/router-default-5444994796-lxr2h container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 22:23:41 crc kubenswrapper[4903]: [-]has-synced failed: reason withheld Nov 26 22:23:41 crc kubenswrapper[4903]: [+]process-running ok Nov 26 22:23:41 crc kubenswrapper[4903]: healthz check failed Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.736887 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-lxr2h" podUID="9ea9e718-d061-4176-b950-12497aeba908" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.751432 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jqckx\" (UniqueName: \"kubernetes.io/projected/ebb30834-3c49-4766-993b-d52693207694-kube-api-access-jqckx\") pod \"redhat-marketplace-kg68t\" (UID: \"ebb30834-3c49-4766-993b-d52693207694\") " pod="openshift-marketplace/redhat-marketplace-kg68t" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.793921 4903 generic.go:334] "Generic (PLEG): container finished" podID="ee178cda-7bde-4997-8554-e20c3548288b" containerID="7043053169eb4f3eb83c7ec4360aea8ca493053e79b6e0d5dabf9cb0ee6ee07b" exitCode=0 Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.794225 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z4f6g" event={"ID":"ee178cda-7bde-4997-8554-e20c3548288b","Type":"ContainerDied","Data":"7043053169eb4f3eb83c7ec4360aea8ca493053e79b6e0d5dabf9cb0ee6ee07b"} Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.807201 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:41 crc kubenswrapper[4903]: E1126 22:23:41.807525 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:42.307499645 +0000 UTC m=+150.997734555 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.809735 4903 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.810992 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403255-kmtfb" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.811935 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403255-kmtfb" event={"ID":"6141f47a-9c9b-4ef8-85c9-518f947bff57","Type":"ContainerDied","Data":"9d2843853485f00c2035084865c7c9d48bb7afeae17b709197179491793b92c6"} Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.811976 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9d2843853485f00c2035084865c7c9d48bb7afeae17b709197179491793b92c6" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.819988 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6z5bc" event={"ID":"747780f1-ef94-4292-b306-4cce345c6a34","Type":"ContainerStarted","Data":"c0bb96ed33c5d38c871a39b59f2d23e9740d5f774db9fbdbaa77522cce34b467"} Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.820020 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6z5bc" event={"ID":"747780f1-ef94-4292-b306-4cce345c6a34","Type":"ContainerStarted","Data":"ff2fe7588507129e55c8c6241632e80a5dec4907e58a49c43a7985eeacacfbe2"} Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.841885 4903 generic.go:334] "Generic (PLEG): container finished" podID="7717a36b-65f8-4c9a-b4be-5ab83fe77c99" containerID="dca04f7477e69aedc751edb25b99b381187a4049519212291cc07112fb6de157" exitCode=0 Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.842154 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mmswh" event={"ID":"7717a36b-65f8-4c9a-b4be-5ab83fe77c99","Type":"ContainerDied","Data":"dca04f7477e69aedc751edb25b99b381187a4049519212291cc07112fb6de157"} Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.857035 4903 generic.go:334] "Generic (PLEG): container finished" podID="e5cf807b-aa56-4158-90c9-519d5e076459" containerID="b4915d22af8c264b10fcb353f18e137d766a2057cbd0aafaacc43c7a64dcfcf3" exitCode=0 Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.858223 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ddms7" event={"ID":"e5cf807b-aa56-4158-90c9-519d5e076459","Type":"ContainerDied","Data":"b4915d22af8c264b10fcb353f18e137d766a2057cbd0aafaacc43c7a64dcfcf3"} Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.858251 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ddms7" event={"ID":"e5cf807b-aa56-4158-90c9-519d5e076459","Type":"ContainerStarted","Data":"892a98652785fe10eb0ffb7bea99f78dce665653cef0dcc141bd0b71a755c52f"} Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.882830 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-lrvrl" Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.911993 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:41 crc kubenswrapper[4903]: E1126 22:23:41.916256 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:42.416240908 +0000 UTC m=+151.106475808 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:41 crc kubenswrapper[4903]: I1126 22:23:41.944786 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kg68t" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.014931 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:42 crc kubenswrapper[4903]: E1126 22:23:42.015352 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:42.515337608 +0000 UTC m=+151.205572518 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.077923 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-rjqdj"] Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.078855 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rjqdj" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.079344 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rjqdj"] Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.080237 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.119252 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jksld\" (UniqueName: \"kubernetes.io/projected/a054a1e6-36d5-4c7c-a520-ee213f7f36fa-kube-api-access-jksld\") pod \"redhat-operators-rjqdj\" (UID: \"a054a1e6-36d5-4c7c-a520-ee213f7f36fa\") " pod="openshift-marketplace/redhat-operators-rjqdj" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.119325 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a054a1e6-36d5-4c7c-a520-ee213f7f36fa-utilities\") pod \"redhat-operators-rjqdj\" (UID: \"a054a1e6-36d5-4c7c-a520-ee213f7f36fa\") " pod="openshift-marketplace/redhat-operators-rjqdj" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.119378 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a054a1e6-36d5-4c7c-a520-ee213f7f36fa-catalog-content\") pod \"redhat-operators-rjqdj\" (UID: \"a054a1e6-36d5-4c7c-a520-ee213f7f36fa\") " pod="openshift-marketplace/redhat-operators-rjqdj" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.119418 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:42 crc kubenswrapper[4903]: E1126 22:23:42.119736 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:42.619725464 +0000 UTC m=+151.309960374 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.223253 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.223609 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jksld\" (UniqueName: \"kubernetes.io/projected/a054a1e6-36d5-4c7c-a520-ee213f7f36fa-kube-api-access-jksld\") pod \"redhat-operators-rjqdj\" (UID: \"a054a1e6-36d5-4c7c-a520-ee213f7f36fa\") " pod="openshift-marketplace/redhat-operators-rjqdj" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.223646 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a054a1e6-36d5-4c7c-a520-ee213f7f36fa-utilities\") pod \"redhat-operators-rjqdj\" (UID: \"a054a1e6-36d5-4c7c-a520-ee213f7f36fa\") " pod="openshift-marketplace/redhat-operators-rjqdj" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.223673 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a054a1e6-36d5-4c7c-a520-ee213f7f36fa-catalog-content\") pod \"redhat-operators-rjqdj\" (UID: \"a054a1e6-36d5-4c7c-a520-ee213f7f36fa\") " pod="openshift-marketplace/redhat-operators-rjqdj" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.224150 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a054a1e6-36d5-4c7c-a520-ee213f7f36fa-catalog-content\") pod \"redhat-operators-rjqdj\" (UID: \"a054a1e6-36d5-4c7c-a520-ee213f7f36fa\") " pod="openshift-marketplace/redhat-operators-rjqdj" Nov 26 22:23:42 crc kubenswrapper[4903]: E1126 22:23:42.224208 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:42.72419461 +0000 UTC m=+151.414429520 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.224592 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a054a1e6-36d5-4c7c-a520-ee213f7f36fa-utilities\") pod \"redhat-operators-rjqdj\" (UID: \"a054a1e6-36d5-4c7c-a520-ee213f7f36fa\") " pod="openshift-marketplace/redhat-operators-rjqdj" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.258580 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jksld\" (UniqueName: \"kubernetes.io/projected/a054a1e6-36d5-4c7c-a520-ee213f7f36fa-kube-api-access-jksld\") pod \"redhat-operators-rjqdj\" (UID: \"a054a1e6-36d5-4c7c-a520-ee213f7f36fa\") " pod="openshift-marketplace/redhat-operators-rjqdj" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.284167 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rjqdj" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.324835 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:42 crc kubenswrapper[4903]: E1126 22:23:42.325411 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:42.825400847 +0000 UTC m=+151.515635757 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.325932 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fttb4"] Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.426677 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:42 crc kubenswrapper[4903]: E1126 22:23:42.427127 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:42.927111809 +0000 UTC m=+151.617346719 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.457524 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-rz6wd"] Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.458745 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rz6wd" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.491316 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rz6wd"] Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.528202 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86qnz\" (UniqueName: \"kubernetes.io/projected/955a0ea6-4526-4530-a0b9-f56b6ed4ff0b-kube-api-access-86qnz\") pod \"redhat-operators-rz6wd\" (UID: \"955a0ea6-4526-4530-a0b9-f56b6ed4ff0b\") " pod="openshift-marketplace/redhat-operators-rz6wd" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.528457 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/955a0ea6-4526-4530-a0b9-f56b6ed4ff0b-catalog-content\") pod \"redhat-operators-rz6wd\" (UID: \"955a0ea6-4526-4530-a0b9-f56b6ed4ff0b\") " pod="openshift-marketplace/redhat-operators-rz6wd" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.528479 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/955a0ea6-4526-4530-a0b9-f56b6ed4ff0b-utilities\") pod \"redhat-operators-rz6wd\" (UID: \"955a0ea6-4526-4530-a0b9-f56b6ed4ff0b\") " pod="openshift-marketplace/redhat-operators-rz6wd" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.528545 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:42 crc kubenswrapper[4903]: E1126 22:23:42.528818 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:43.028804591 +0000 UTC m=+151.719039501 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.631141 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.631368 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/955a0ea6-4526-4530-a0b9-f56b6ed4ff0b-utilities\") pod \"redhat-operators-rz6wd\" (UID: \"955a0ea6-4526-4530-a0b9-f56b6ed4ff0b\") " pod="openshift-marketplace/redhat-operators-rz6wd" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.631466 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86qnz\" (UniqueName: \"kubernetes.io/projected/955a0ea6-4526-4530-a0b9-f56b6ed4ff0b-kube-api-access-86qnz\") pod \"redhat-operators-rz6wd\" (UID: \"955a0ea6-4526-4530-a0b9-f56b6ed4ff0b\") " pod="openshift-marketplace/redhat-operators-rz6wd" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.631485 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/955a0ea6-4526-4530-a0b9-f56b6ed4ff0b-catalog-content\") pod \"redhat-operators-rz6wd\" (UID: \"955a0ea6-4526-4530-a0b9-f56b6ed4ff0b\") " pod="openshift-marketplace/redhat-operators-rz6wd" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.631852 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/955a0ea6-4526-4530-a0b9-f56b6ed4ff0b-catalog-content\") pod \"redhat-operators-rz6wd\" (UID: \"955a0ea6-4526-4530-a0b9-f56b6ed4ff0b\") " pod="openshift-marketplace/redhat-operators-rz6wd" Nov 26 22:23:42 crc kubenswrapper[4903]: E1126 22:23:42.631913 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:43.13189865 +0000 UTC m=+151.822133560 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.632370 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/955a0ea6-4526-4530-a0b9-f56b6ed4ff0b-utilities\") pod \"redhat-operators-rz6wd\" (UID: \"955a0ea6-4526-4530-a0b9-f56b6ed4ff0b\") " pod="openshift-marketplace/redhat-operators-rz6wd" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.657962 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86qnz\" (UniqueName: \"kubernetes.io/projected/955a0ea6-4526-4530-a0b9-f56b6ed4ff0b-kube-api-access-86qnz\") pod \"redhat-operators-rz6wd\" (UID: \"955a0ea6-4526-4530-a0b9-f56b6ed4ff0b\") " pod="openshift-marketplace/redhat-operators-rz6wd" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.672972 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.673829 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.685193 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.686017 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.713638 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.729722 4903 patch_prober.go:28] interesting pod/router-default-5444994796-lxr2h container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 22:23:42 crc kubenswrapper[4903]: [-]has-synced failed: reason withheld Nov 26 22:23:42 crc kubenswrapper[4903]: [+]process-running ok Nov 26 22:23:42 crc kubenswrapper[4903]: healthz check failed Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.729764 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-lxr2h" podUID="9ea9e718-d061-4176-b950-12497aeba908" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.732302 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.732379 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5a6cd498-d18b-4c56-a1ab-1b8e202690be-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"5a6cd498-d18b-4c56-a1ab-1b8e202690be\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.732425 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/5a6cd498-d18b-4c56-a1ab-1b8e202690be-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"5a6cd498-d18b-4c56-a1ab-1b8e202690be\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 22:23:42 crc kubenswrapper[4903]: E1126 22:23:42.733155 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:43.233143199 +0000 UTC m=+151.923378109 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.766366 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kg68t"] Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.776558 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rz6wd" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.801020 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rjqdj"] Nov 26 22:23:42 crc kubenswrapper[4903]: W1126 22:23:42.826201 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda054a1e6_36d5_4c7c_a520_ee213f7f36fa.slice/crio-6dcda3aca602a3c55780b3e002247af081476858ab88e9da403e57fc97c878df WatchSource:0}: Error finding container 6dcda3aca602a3c55780b3e002247af081476858ab88e9da403e57fc97c878df: Status 404 returned error can't find the container with id 6dcda3aca602a3c55780b3e002247af081476858ab88e9da403e57fc97c878df Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.833945 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:42 crc kubenswrapper[4903]: E1126 22:23:42.834145 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:43.334112091 +0000 UTC m=+152.024347001 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.834242 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.834326 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5a6cd498-d18b-4c56-a1ab-1b8e202690be-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"5a6cd498-d18b-4c56-a1ab-1b8e202690be\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.834378 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/5a6cd498-d18b-4c56-a1ab-1b8e202690be-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"5a6cd498-d18b-4c56-a1ab-1b8e202690be\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.834511 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/5a6cd498-d18b-4c56-a1ab-1b8e202690be-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"5a6cd498-d18b-4c56-a1ab-1b8e202690be\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 22:23:42 crc kubenswrapper[4903]: E1126 22:23:42.834756 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:43.334744347 +0000 UTC m=+152.024979257 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.865533 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5a6cd498-d18b-4c56-a1ab-1b8e202690be-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"5a6cd498-d18b-4c56-a1ab-1b8e202690be\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.879711 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kg68t" event={"ID":"ebb30834-3c49-4766-993b-d52693207694","Type":"ContainerStarted","Data":"f2b7894dc34ddf310ecba74466508af39455e5d9567e330366831db49427fa54"} Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.883720 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"b43493dc589eb14ecf0184bb83d92255fceaa6702afe34ed6a669a3db77642c0"} Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.883753 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"8857c9a305c2a4e8afb570b996d9e4698308013daad0ddd5ee16dbe8bab931ba"} Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.889328 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"9aa20e41b184a3df5580f7c93e180537fa5677885c7245b749ac4a717174ebcb"} Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.889368 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"7a5e4dfe717456aa3e83ce07fdc5a7a6f0cd7350d40ee5ff4bf6fb70dcafed36"} Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.890678 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"dfee966e599fe0a5d34d58aede67043df61498f2fbf3d4c7f2e72fee9739a5b2"} Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.890766 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"c7a2f9868c43bac78247c6e1276ff5e40b18836b682d236f7ab3bbfffdfd7ad5"} Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.890933 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.924052 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-7gs98" event={"ID":"bb685681-92a0-4b59-aa97-b02b9b4c73f9","Type":"ContainerStarted","Data":"e5e56a75aea105682d573795b0c64900639a28b62f98475993c73b38fe0549de"} Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.936213 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:42 crc kubenswrapper[4903]: E1126 22:23:42.938760 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:43.438741371 +0000 UTC m=+152.128976281 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.942529 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rjqdj" event={"ID":"a054a1e6-36d5-4c7c-a520-ee213f7f36fa","Type":"ContainerStarted","Data":"6dcda3aca602a3c55780b3e002247af081476858ab88e9da403e57fc97c878df"} Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.958157 4903 generic.go:334] "Generic (PLEG): container finished" podID="747780f1-ef94-4292-b306-4cce345c6a34" containerID="c0bb96ed33c5d38c871a39b59f2d23e9740d5f774db9fbdbaa77522cce34b467" exitCode=0 Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.958383 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6z5bc" event={"ID":"747780f1-ef94-4292-b306-4cce345c6a34","Type":"ContainerDied","Data":"c0bb96ed33c5d38c871a39b59f2d23e9740d5f774db9fbdbaa77522cce34b467"} Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.962082 4903 generic.go:334] "Generic (PLEG): container finished" podID="44dc303d-e9c6-4ced-9472-715779cd0dba" containerID="f0d9f7c8687b30f16665e98ba8cefaff2131441b8bfff5aa979781061912514d" exitCode=0 Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.963384 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fttb4" event={"ID":"44dc303d-e9c6-4ced-9472-715779cd0dba","Type":"ContainerDied","Data":"f0d9f7c8687b30f16665e98ba8cefaff2131441b8bfff5aa979781061912514d"} Nov 26 22:23:42 crc kubenswrapper[4903]: I1126 22:23:42.963409 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fttb4" event={"ID":"44dc303d-e9c6-4ced-9472-715779cd0dba","Type":"ContainerStarted","Data":"a722fac1484ae4dc823ec8b1d0f3d02a20429fdcd8ffbefbb7f310fc3b721e4d"} Nov 26 22:23:43 crc kubenswrapper[4903]: I1126 22:23:43.040271 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 22:23:43 crc kubenswrapper[4903]: I1126 22:23:43.040683 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:43 crc kubenswrapper[4903]: E1126 22:23:43.041274 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:43.541262586 +0000 UTC m=+152.231497496 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:43 crc kubenswrapper[4903]: I1126 22:23:43.074597 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rz6wd"] Nov 26 22:23:43 crc kubenswrapper[4903]: I1126 22:23:43.141896 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:43 crc kubenswrapper[4903]: E1126 22:23:43.142662 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:43.642633208 +0000 UTC m=+152.332868118 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:43 crc kubenswrapper[4903]: I1126 22:23:43.145744 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:43 crc kubenswrapper[4903]: E1126 22:23:43.146114 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:43.646105413 +0000 UTC m=+152.336340323 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:43 crc kubenswrapper[4903]: E1126 22:23:43.213251 4903 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podebb30834_3c49_4766_993b_d52693207694.slice/crio-fc9ea7966eb710c621e7aecdfc48cb7b370d111204d7b47959f3cd70ca22d4ca.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podebb30834_3c49_4766_993b_d52693207694.slice/crio-conmon-fc9ea7966eb710c621e7aecdfc48cb7b370d111204d7b47959f3cd70ca22d4ca.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda054a1e6_36d5_4c7c_a520_ee213f7f36fa.slice/crio-229320a83cff469bd5f0b9666f9e9a3f3124ce6408764ac43683a140c2b95694.scope\": RecentStats: unable to find data in memory cache]" Nov 26 22:23:43 crc kubenswrapper[4903]: I1126 22:23:43.245178 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 26 22:23:43 crc kubenswrapper[4903]: I1126 22:23:43.246737 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:43 crc kubenswrapper[4903]: E1126 22:23:43.247110 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:43.747094665 +0000 UTC m=+152.437329565 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:43 crc kubenswrapper[4903]: I1126 22:23:43.301117 4903 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 26 22:23:43 crc kubenswrapper[4903]: I1126 22:23:43.348499 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:43 crc kubenswrapper[4903]: E1126 22:23:43.348821 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:43.848803876 +0000 UTC m=+152.539038786 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:43 crc kubenswrapper[4903]: I1126 22:23:43.449584 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:43 crc kubenswrapper[4903]: E1126 22:23:43.449787 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:43.949762867 +0000 UTC m=+152.639997777 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:43 crc kubenswrapper[4903]: I1126 22:23:43.449908 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:43 crc kubenswrapper[4903]: E1126 22:23:43.450213 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:43.95020332 +0000 UTC m=+152.640438230 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:43 crc kubenswrapper[4903]: E1126 22:23:43.552081 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:44.052045204 +0000 UTC m=+152.742280114 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:43 crc kubenswrapper[4903]: I1126 22:23:43.552375 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:43 crc kubenswrapper[4903]: I1126 22:23:43.553387 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:43 crc kubenswrapper[4903]: E1126 22:23:43.553986 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:44.053978468 +0000 UTC m=+152.744213378 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:43 crc kubenswrapper[4903]: I1126 22:23:43.655043 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:43 crc kubenswrapper[4903]: E1126 22:23:43.655484 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:44.155462293 +0000 UTC m=+152.845697203 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:43 crc kubenswrapper[4903]: I1126 22:23:43.728374 4903 patch_prober.go:28] interesting pod/router-default-5444994796-lxr2h container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 22:23:43 crc kubenswrapper[4903]: [-]has-synced failed: reason withheld Nov 26 22:23:43 crc kubenswrapper[4903]: [+]process-running ok Nov 26 22:23:43 crc kubenswrapper[4903]: healthz check failed Nov 26 22:23:43 crc kubenswrapper[4903]: I1126 22:23:43.728466 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-lxr2h" podUID="9ea9e718-d061-4176-b950-12497aeba908" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 22:23:43 crc kubenswrapper[4903]: I1126 22:23:43.757926 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:43 crc kubenswrapper[4903]: E1126 22:23:43.758593 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:44.258574653 +0000 UTC m=+152.948809563 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:43 crc kubenswrapper[4903]: I1126 22:23:43.859106 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:43 crc kubenswrapper[4903]: E1126 22:23:43.859544 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:44.359526094 +0000 UTC m=+153.049761014 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:43 crc kubenswrapper[4903]: I1126 22:23:43.961030 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:43 crc kubenswrapper[4903]: E1126 22:23:43.961653 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:44.461625716 +0000 UTC m=+153.151860636 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:43 crc kubenswrapper[4903]: I1126 22:23:43.975611 4903 generic.go:334] "Generic (PLEG): container finished" podID="955a0ea6-4526-4530-a0b9-f56b6ed4ff0b" containerID="3d3d1af2a8371d259291bdf597d033c61e0235db82cc25a456d4fe836e7ca467" exitCode=0 Nov 26 22:23:43 crc kubenswrapper[4903]: I1126 22:23:43.975785 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rz6wd" event={"ID":"955a0ea6-4526-4530-a0b9-f56b6ed4ff0b","Type":"ContainerDied","Data":"3d3d1af2a8371d259291bdf597d033c61e0235db82cc25a456d4fe836e7ca467"} Nov 26 22:23:43 crc kubenswrapper[4903]: I1126 22:23:43.975845 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rz6wd" event={"ID":"955a0ea6-4526-4530-a0b9-f56b6ed4ff0b","Type":"ContainerStarted","Data":"e81a22ae817001f6b7821e3d3792d0d43cbfa1112f6c969800683d5fbcb44ede"} Nov 26 22:23:43 crc kubenswrapper[4903]: I1126 22:23:43.989079 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-7gs98" event={"ID":"bb685681-92a0-4b59-aa97-b02b9b4c73f9","Type":"ContainerStarted","Data":"b229aaae3a10c1a77c361d5cd54d3fd539683b9f5f9b95b3bf924ca31b9252f1"} Nov 26 22:23:43 crc kubenswrapper[4903]: I1126 22:23:43.989137 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-7gs98" event={"ID":"bb685681-92a0-4b59-aa97-b02b9b4c73f9","Type":"ContainerStarted","Data":"2080a0fbef9f16c6e6b376bc84f81d15d70cdd119a88edcedd5976d8bb3352b8"} Nov 26 22:23:43 crc kubenswrapper[4903]: I1126 22:23:43.992056 4903 generic.go:334] "Generic (PLEG): container finished" podID="a054a1e6-36d5-4c7c-a520-ee213f7f36fa" containerID="229320a83cff469bd5f0b9666f9e9a3f3124ce6408764ac43683a140c2b95694" exitCode=0 Nov 26 22:23:43 crc kubenswrapper[4903]: I1126 22:23:43.992222 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rjqdj" event={"ID":"a054a1e6-36d5-4c7c-a520-ee213f7f36fa","Type":"ContainerDied","Data":"229320a83cff469bd5f0b9666f9e9a3f3124ce6408764ac43683a140c2b95694"} Nov 26 22:23:43 crc kubenswrapper[4903]: I1126 22:23:43.996425 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"5a6cd498-d18b-4c56-a1ab-1b8e202690be","Type":"ContainerStarted","Data":"f75e08d9c6191a56f1dd448b6e0ba44482c78a2e8985a234b215943c8432b1c7"} Nov 26 22:23:43 crc kubenswrapper[4903]: I1126 22:23:43.996492 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"5a6cd498-d18b-4c56-a1ab-1b8e202690be","Type":"ContainerStarted","Data":"56fde3c891d3e0ec225d6370e4befcd9a5dab52c245d945809843f9e108c9c8a"} Nov 26 22:23:44 crc kubenswrapper[4903]: I1126 22:23:44.000629 4903 generic.go:334] "Generic (PLEG): container finished" podID="ebb30834-3c49-4766-993b-d52693207694" containerID="fc9ea7966eb710c621e7aecdfc48cb7b370d111204d7b47959f3cd70ca22d4ca" exitCode=0 Nov 26 22:23:44 crc kubenswrapper[4903]: I1126 22:23:44.001638 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kg68t" event={"ID":"ebb30834-3c49-4766-993b-d52693207694","Type":"ContainerDied","Data":"fc9ea7966eb710c621e7aecdfc48cb7b370d111204d7b47959f3cd70ca22d4ca"} Nov 26 22:23:44 crc kubenswrapper[4903]: I1126 22:23:44.060475 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=2.060457909 podStartE2EDuration="2.060457909s" podCreationTimestamp="2025-11-26 22:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:44.055150804 +0000 UTC m=+152.745385734" watchObservedRunningTime="2025-11-26 22:23:44.060457909 +0000 UTC m=+152.750692839" Nov 26 22:23:44 crc kubenswrapper[4903]: I1126 22:23:44.062116 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:44 crc kubenswrapper[4903]: E1126 22:23:44.062232 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:44.562204227 +0000 UTC m=+153.252439147 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:44 crc kubenswrapper[4903]: I1126 22:23:44.062353 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:44 crc kubenswrapper[4903]: E1126 22:23:44.063780 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:44.563768599 +0000 UTC m=+153.254003519 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:44 crc kubenswrapper[4903]: I1126 22:23:44.088449 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-7gs98" podStartSLOduration=12.088412204 podStartE2EDuration="12.088412204s" podCreationTimestamp="2025-11-26 22:23:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:44.084970109 +0000 UTC m=+152.775205039" watchObservedRunningTime="2025-11-26 22:23:44.088412204 +0000 UTC m=+152.778647124" Nov 26 22:23:44 crc kubenswrapper[4903]: I1126 22:23:44.163816 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:44 crc kubenswrapper[4903]: E1126 22:23:44.164105 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 22:23:44.664058313 +0000 UTC m=+153.354293273 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:44 crc kubenswrapper[4903]: I1126 22:23:44.164189 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:44 crc kubenswrapper[4903]: E1126 22:23:44.164537 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 22:23:44.664523055 +0000 UTC m=+153.354757965 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-qb8qh" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 22:23:44 crc kubenswrapper[4903]: I1126 22:23:44.218886 4903 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-26T22:23:43.301148513Z","Handler":null,"Name":""} Nov 26 22:23:44 crc kubenswrapper[4903]: I1126 22:23:44.225062 4903 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 26 22:23:44 crc kubenswrapper[4903]: I1126 22:23:44.225127 4903 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 26 22:23:44 crc kubenswrapper[4903]: I1126 22:23:44.265428 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 22:23:44 crc kubenswrapper[4903]: I1126 22:23:44.273068 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 26 22:23:44 crc kubenswrapper[4903]: I1126 22:23:44.367052 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:44 crc kubenswrapper[4903]: I1126 22:23:44.487803 4903 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 22:23:44 crc kubenswrapper[4903]: I1126 22:23:44.492444 4903 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:44 crc kubenswrapper[4903]: I1126 22:23:44.587231 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-qb8qh\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:44 crc kubenswrapper[4903]: I1126 22:23:44.671490 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:44 crc kubenswrapper[4903]: I1126 22:23:44.725218 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-lxr2h" Nov 26 22:23:44 crc kubenswrapper[4903]: I1126 22:23:44.731748 4903 patch_prober.go:28] interesting pod/router-default-5444994796-lxr2h container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 22:23:44 crc kubenswrapper[4903]: [-]has-synced failed: reason withheld Nov 26 22:23:44 crc kubenswrapper[4903]: [+]process-running ok Nov 26 22:23:44 crc kubenswrapper[4903]: healthz check failed Nov 26 22:23:44 crc kubenswrapper[4903]: I1126 22:23:44.731804 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-lxr2h" podUID="9ea9e718-d061-4176-b950-12497aeba908" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 22:23:44 crc kubenswrapper[4903]: I1126 22:23:44.753062 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-d8tw8" Nov 26 22:23:44 crc kubenswrapper[4903]: I1126 22:23:44.791444 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:44 crc kubenswrapper[4903]: I1126 22:23:44.796283 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-tjpcm" Nov 26 22:23:45 crc kubenswrapper[4903]: I1126 22:23:45.017528 4903 generic.go:334] "Generic (PLEG): container finished" podID="5a6cd498-d18b-4c56-a1ab-1b8e202690be" containerID="f75e08d9c6191a56f1dd448b6e0ba44482c78a2e8985a234b215943c8432b1c7" exitCode=0 Nov 26 22:23:45 crc kubenswrapper[4903]: I1126 22:23:45.017720 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"5a6cd498-d18b-4c56-a1ab-1b8e202690be","Type":"ContainerDied","Data":"f75e08d9c6191a56f1dd448b6e0ba44482c78a2e8985a234b215943c8432b1c7"} Nov 26 22:23:45 crc kubenswrapper[4903]: I1126 22:23:45.206811 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-sfnff" Nov 26 22:23:45 crc kubenswrapper[4903]: I1126 22:23:45.206923 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-sfnff" Nov 26 22:23:45 crc kubenswrapper[4903]: I1126 22:23:45.209105 4903 patch_prober.go:28] interesting pod/console-f9d7485db-sfnff container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Nov 26 22:23:45 crc kubenswrapper[4903]: I1126 22:23:45.209160 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-sfnff" podUID="1293736c-513c-490e-afb1-97df72e3e51c" containerName="console" probeResult="failure" output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" Nov 26 22:23:45 crc kubenswrapper[4903]: I1126 22:23:45.210493 4903 patch_prober.go:28] interesting pod/downloads-7954f5f757-7qxf7 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" start-of-body= Nov 26 22:23:45 crc kubenswrapper[4903]: I1126 22:23:45.210525 4903 patch_prober.go:28] interesting pod/downloads-7954f5f757-7qxf7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" start-of-body= Nov 26 22:23:45 crc kubenswrapper[4903]: I1126 22:23:45.210537 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-7qxf7" podUID="e9e3e3eb-d7d9-4495-bd83-4107f00ae04a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" Nov 26 22:23:45 crc kubenswrapper[4903]: I1126 22:23:45.210558 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7qxf7" podUID="e9e3e3eb-d7d9-4495-bd83-4107f00ae04a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" Nov 26 22:23:45 crc kubenswrapper[4903]: I1126 22:23:45.277911 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-qb8qh"] Nov 26 22:23:45 crc kubenswrapper[4903]: I1126 22:23:45.300076 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 26 22:23:45 crc kubenswrapper[4903]: I1126 22:23:45.300678 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 22:23:45 crc kubenswrapper[4903]: I1126 22:23:45.303124 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 26 22:23:45 crc kubenswrapper[4903]: I1126 22:23:45.303133 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 26 22:23:45 crc kubenswrapper[4903]: I1126 22:23:45.317593 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 26 22:23:45 crc kubenswrapper[4903]: W1126 22:23:45.342846 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod946ffb39_1ab9_4606_aeba_77e75d32fa17.slice/crio-aed33e18f6006586cc084039051a2f8afd00447d40a1179ab90e7d8cc4b3c0ca WatchSource:0}: Error finding container aed33e18f6006586cc084039051a2f8afd00447d40a1179ab90e7d8cc4b3c0ca: Status 404 returned error can't find the container with id aed33e18f6006586cc084039051a2f8afd00447d40a1179ab90e7d8cc4b3c0ca Nov 26 22:23:45 crc kubenswrapper[4903]: I1126 22:23:45.389188 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ed4c41af-65e0-4b74-834f-346c66d32263-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"ed4c41af-65e0-4b74-834f-346c66d32263\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 22:23:45 crc kubenswrapper[4903]: I1126 22:23:45.389283 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ed4c41af-65e0-4b74-834f-346c66d32263-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"ed4c41af-65e0-4b74-834f-346c66d32263\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 22:23:45 crc kubenswrapper[4903]: I1126 22:23:45.490408 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ed4c41af-65e0-4b74-834f-346c66d32263-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"ed4c41af-65e0-4b74-834f-346c66d32263\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 22:23:45 crc kubenswrapper[4903]: I1126 22:23:45.490515 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ed4c41af-65e0-4b74-834f-346c66d32263-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"ed4c41af-65e0-4b74-834f-346c66d32263\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 22:23:45 crc kubenswrapper[4903]: I1126 22:23:45.490604 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ed4c41af-65e0-4b74-834f-346c66d32263-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"ed4c41af-65e0-4b74-834f-346c66d32263\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 22:23:45 crc kubenswrapper[4903]: I1126 22:23:45.494479 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-94zhz" Nov 26 22:23:45 crc kubenswrapper[4903]: I1126 22:23:45.513035 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ed4c41af-65e0-4b74-834f-346c66d32263-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"ed4c41af-65e0-4b74-834f-346c66d32263\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 22:23:45 crc kubenswrapper[4903]: I1126 22:23:45.621957 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 22:23:45 crc kubenswrapper[4903]: I1126 22:23:45.730829 4903 patch_prober.go:28] interesting pod/router-default-5444994796-lxr2h container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 22:23:45 crc kubenswrapper[4903]: [-]has-synced failed: reason withheld Nov 26 22:23:45 crc kubenswrapper[4903]: [+]process-running ok Nov 26 22:23:45 crc kubenswrapper[4903]: healthz check failed Nov 26 22:23:45 crc kubenswrapper[4903]: I1126 22:23:45.731104 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-lxr2h" podUID="9ea9e718-d061-4176-b950-12497aeba908" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 22:23:45 crc kubenswrapper[4903]: I1126 22:23:45.903803 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 26 22:23:45 crc kubenswrapper[4903]: W1126 22:23:45.913556 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-poded4c41af_65e0_4b74_834f_346c66d32263.slice/crio-c5757d49c6f5f411a11926b15770e1f93d3c37b40ed5edca44ff79be3926a795 WatchSource:0}: Error finding container c5757d49c6f5f411a11926b15770e1f93d3c37b40ed5edca44ff79be3926a795: Status 404 returned error can't find the container with id c5757d49c6f5f411a11926b15770e1f93d3c37b40ed5edca44ff79be3926a795 Nov 26 22:23:46 crc kubenswrapper[4903]: I1126 22:23:46.042053 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 26 22:23:46 crc kubenswrapper[4903]: I1126 22:23:46.044215 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" event={"ID":"946ffb39-1ab9-4606-aeba-77e75d32fa17","Type":"ContainerStarted","Data":"236d33ede4cdc7114d7bf73ae508b5fbeca9f01b32d5920a69a60321053468e3"} Nov 26 22:23:46 crc kubenswrapper[4903]: I1126 22:23:46.044301 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" event={"ID":"946ffb39-1ab9-4606-aeba-77e75d32fa17","Type":"ContainerStarted","Data":"aed33e18f6006586cc084039051a2f8afd00447d40a1179ab90e7d8cc4b3c0ca"} Nov 26 22:23:46 crc kubenswrapper[4903]: I1126 22:23:46.044380 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:23:46 crc kubenswrapper[4903]: I1126 22:23:46.046956 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"ed4c41af-65e0-4b74-834f-346c66d32263","Type":"ContainerStarted","Data":"c5757d49c6f5f411a11926b15770e1f93d3c37b40ed5edca44ff79be3926a795"} Nov 26 22:23:46 crc kubenswrapper[4903]: I1126 22:23:46.070772 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" podStartSLOduration=134.070742766 podStartE2EDuration="2m14.070742766s" podCreationTimestamp="2025-11-26 22:21:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:46.061424192 +0000 UTC m=+154.751659112" watchObservedRunningTime="2025-11-26 22:23:46.070742766 +0000 UTC m=+154.760977666" Nov 26 22:23:46 crc kubenswrapper[4903]: I1126 22:23:46.310036 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 22:23:46 crc kubenswrapper[4903]: I1126 22:23:46.408472 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/5a6cd498-d18b-4c56-a1ab-1b8e202690be-kubelet-dir\") pod \"5a6cd498-d18b-4c56-a1ab-1b8e202690be\" (UID: \"5a6cd498-d18b-4c56-a1ab-1b8e202690be\") " Nov 26 22:23:46 crc kubenswrapper[4903]: I1126 22:23:46.408658 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5a6cd498-d18b-4c56-a1ab-1b8e202690be-kube-api-access\") pod \"5a6cd498-d18b-4c56-a1ab-1b8e202690be\" (UID: \"5a6cd498-d18b-4c56-a1ab-1b8e202690be\") " Nov 26 22:23:46 crc kubenswrapper[4903]: I1126 22:23:46.408777 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5a6cd498-d18b-4c56-a1ab-1b8e202690be-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "5a6cd498-d18b-4c56-a1ab-1b8e202690be" (UID: "5a6cd498-d18b-4c56-a1ab-1b8e202690be"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:23:46 crc kubenswrapper[4903]: I1126 22:23:46.409129 4903 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/5a6cd498-d18b-4c56-a1ab-1b8e202690be-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 26 22:23:46 crc kubenswrapper[4903]: I1126 22:23:46.431151 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a6cd498-d18b-4c56-a1ab-1b8e202690be-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "5a6cd498-d18b-4c56-a1ab-1b8e202690be" (UID: "5a6cd498-d18b-4c56-a1ab-1b8e202690be"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:23:46 crc kubenswrapper[4903]: I1126 22:23:46.510818 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5a6cd498-d18b-4c56-a1ab-1b8e202690be-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 22:23:46 crc kubenswrapper[4903]: I1126 22:23:46.728308 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-lxr2h" Nov 26 22:23:46 crc kubenswrapper[4903]: I1126 22:23:46.730854 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-lxr2h" Nov 26 22:23:47 crc kubenswrapper[4903]: I1126 22:23:47.058325 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"5a6cd498-d18b-4c56-a1ab-1b8e202690be","Type":"ContainerDied","Data":"56fde3c891d3e0ec225d6370e4befcd9a5dab52c245d945809843f9e108c9c8a"} Nov 26 22:23:47 crc kubenswrapper[4903]: I1126 22:23:47.058361 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="56fde3c891d3e0ec225d6370e4befcd9a5dab52c245d945809843f9e108c9c8a" Nov 26 22:23:47 crc kubenswrapper[4903]: I1126 22:23:47.058361 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 22:23:47 crc kubenswrapper[4903]: I1126 22:23:47.060309 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"ed4c41af-65e0-4b74-834f-346c66d32263","Type":"ContainerStarted","Data":"c9c3a1001787806ee7ed4d7ce281ff2e5150a7b269b2c12d39cdfcb026e24675"} Nov 26 22:23:47 crc kubenswrapper[4903]: I1126 22:23:47.094026 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=2.093993481 podStartE2EDuration="2.093993481s" podCreationTimestamp="2025-11-26 22:23:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:23:47.09357614 +0000 UTC m=+155.783811050" watchObservedRunningTime="2025-11-26 22:23:47.093993481 +0000 UTC m=+155.784228391" Nov 26 22:23:47 crc kubenswrapper[4903]: I1126 22:23:47.506262 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-dbzzj" Nov 26 22:23:48 crc kubenswrapper[4903]: I1126 22:23:48.087231 4903 generic.go:334] "Generic (PLEG): container finished" podID="ed4c41af-65e0-4b74-834f-346c66d32263" containerID="c9c3a1001787806ee7ed4d7ce281ff2e5150a7b269b2c12d39cdfcb026e24675" exitCode=0 Nov 26 22:23:48 crc kubenswrapper[4903]: I1126 22:23:48.087573 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"ed4c41af-65e0-4b74-834f-346c66d32263","Type":"ContainerDied","Data":"c9c3a1001787806ee7ed4d7ce281ff2e5150a7b269b2c12d39cdfcb026e24675"} Nov 26 22:23:49 crc kubenswrapper[4903]: I1126 22:23:49.438429 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 22:23:49 crc kubenswrapper[4903]: I1126 22:23:49.482573 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ed4c41af-65e0-4b74-834f-346c66d32263-kube-api-access\") pod \"ed4c41af-65e0-4b74-834f-346c66d32263\" (UID: \"ed4c41af-65e0-4b74-834f-346c66d32263\") " Nov 26 22:23:49 crc kubenswrapper[4903]: I1126 22:23:49.482618 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ed4c41af-65e0-4b74-834f-346c66d32263-kubelet-dir\") pod \"ed4c41af-65e0-4b74-834f-346c66d32263\" (UID: \"ed4c41af-65e0-4b74-834f-346c66d32263\") " Nov 26 22:23:49 crc kubenswrapper[4903]: I1126 22:23:49.482745 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ed4c41af-65e0-4b74-834f-346c66d32263-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "ed4c41af-65e0-4b74-834f-346c66d32263" (UID: "ed4c41af-65e0-4b74-834f-346c66d32263"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:23:49 crc kubenswrapper[4903]: I1126 22:23:49.483821 4903 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ed4c41af-65e0-4b74-834f-346c66d32263-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 26 22:23:49 crc kubenswrapper[4903]: I1126 22:23:49.488610 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed4c41af-65e0-4b74-834f-346c66d32263-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "ed4c41af-65e0-4b74-834f-346c66d32263" (UID: "ed4c41af-65e0-4b74-834f-346c66d32263"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:23:49 crc kubenswrapper[4903]: I1126 22:23:49.585075 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ed4c41af-65e0-4b74-834f-346c66d32263-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 22:23:50 crc kubenswrapper[4903]: I1126 22:23:50.105008 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"ed4c41af-65e0-4b74-834f-346c66d32263","Type":"ContainerDied","Data":"c5757d49c6f5f411a11926b15770e1f93d3c37b40ed5edca44ff79be3926a795"} Nov 26 22:23:50 crc kubenswrapper[4903]: I1126 22:23:50.105042 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 22:23:50 crc kubenswrapper[4903]: I1126 22:23:50.105056 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c5757d49c6f5f411a11926b15770e1f93d3c37b40ed5edca44ff79be3926a795" Nov 26 22:23:55 crc kubenswrapper[4903]: I1126 22:23:55.211011 4903 patch_prober.go:28] interesting pod/downloads-7954f5f757-7qxf7 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" start-of-body= Nov 26 22:23:55 crc kubenswrapper[4903]: I1126 22:23:55.211667 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-7qxf7" podUID="e9e3e3eb-d7d9-4495-bd83-4107f00ae04a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" Nov 26 22:23:55 crc kubenswrapper[4903]: I1126 22:23:55.211077 4903 patch_prober.go:28] interesting pod/downloads-7954f5f757-7qxf7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" start-of-body= Nov 26 22:23:55 crc kubenswrapper[4903]: I1126 22:23:55.212239 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7qxf7" podUID="e9e3e3eb-d7d9-4495-bd83-4107f00ae04a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" Nov 26 22:23:55 crc kubenswrapper[4903]: I1126 22:23:55.216180 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-sfnff" Nov 26 22:23:55 crc kubenswrapper[4903]: I1126 22:23:55.228384 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-sfnff" Nov 26 22:23:56 crc kubenswrapper[4903]: I1126 22:23:56.994482 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/aef28737-00fd-4738-ae1f-e02a5b974905-metrics-certs\") pod \"network-metrics-daemon-q8dvw\" (UID: \"aef28737-00fd-4738-ae1f-e02a5b974905\") " pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:23:57 crc kubenswrapper[4903]: I1126 22:23:57.017356 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/aef28737-00fd-4738-ae1f-e02a5b974905-metrics-certs\") pod \"network-metrics-daemon-q8dvw\" (UID: \"aef28737-00fd-4738-ae1f-e02a5b974905\") " pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:23:57 crc kubenswrapper[4903]: I1126 22:23:57.164857 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-q8dvw" Nov 26 22:24:01 crc kubenswrapper[4903]: I1126 22:24:01.981377 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 22:24:01 crc kubenswrapper[4903]: I1126 22:24:01.981438 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 22:24:04 crc kubenswrapper[4903]: I1126 22:24:04.683822 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:24:05 crc kubenswrapper[4903]: I1126 22:24:05.210515 4903 patch_prober.go:28] interesting pod/downloads-7954f5f757-7qxf7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" start-of-body= Nov 26 22:24:05 crc kubenswrapper[4903]: I1126 22:24:05.210543 4903 patch_prober.go:28] interesting pod/downloads-7954f5f757-7qxf7 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" start-of-body= Nov 26 22:24:05 crc kubenswrapper[4903]: I1126 22:24:05.210599 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7qxf7" podUID="e9e3e3eb-d7d9-4495-bd83-4107f00ae04a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" Nov 26 22:24:05 crc kubenswrapper[4903]: I1126 22:24:05.210613 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-7qxf7" podUID="e9e3e3eb-d7d9-4495-bd83-4107f00ae04a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" Nov 26 22:24:05 crc kubenswrapper[4903]: I1126 22:24:05.210684 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-7qxf7" Nov 26 22:24:05 crc kubenswrapper[4903]: I1126 22:24:05.211470 4903 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"a749a15d6277b58c284fefd718865f9694e4ab9e06f5f4e3df11246a51350aeb"} pod="openshift-console/downloads-7954f5f757-7qxf7" containerMessage="Container download-server failed liveness probe, will be restarted" Nov 26 22:24:05 crc kubenswrapper[4903]: I1126 22:24:05.211612 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-7qxf7" podUID="e9e3e3eb-d7d9-4495-bd83-4107f00ae04a" containerName="download-server" containerID="cri-o://a749a15d6277b58c284fefd718865f9694e4ab9e06f5f4e3df11246a51350aeb" gracePeriod=2 Nov 26 22:24:05 crc kubenswrapper[4903]: I1126 22:24:05.211661 4903 patch_prober.go:28] interesting pod/downloads-7954f5f757-7qxf7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" start-of-body= Nov 26 22:24:05 crc kubenswrapper[4903]: I1126 22:24:05.211790 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7qxf7" podUID="e9e3e3eb-d7d9-4495-bd83-4107f00ae04a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" Nov 26 22:24:05 crc kubenswrapper[4903]: I1126 22:24:05.766989 4903 patch_prober.go:28] interesting pod/router-default-5444994796-lxr2h container/router namespace/openshift-ingress: Readiness probe status=failure output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 26 22:24:05 crc kubenswrapper[4903]: I1126 22:24:05.767085 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ingress/router-default-5444994796-lxr2h" podUID="9ea9e718-d061-4176-b950-12497aeba908" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 22:24:06 crc kubenswrapper[4903]: I1126 22:24:06.202077 4903 generic.go:334] "Generic (PLEG): container finished" podID="e9e3e3eb-d7d9-4495-bd83-4107f00ae04a" containerID="a749a15d6277b58c284fefd718865f9694e4ab9e06f5f4e3df11246a51350aeb" exitCode=0 Nov 26 22:24:06 crc kubenswrapper[4903]: I1126 22:24:06.202243 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-7qxf7" event={"ID":"e9e3e3eb-d7d9-4495-bd83-4107f00ae04a","Type":"ContainerDied","Data":"a749a15d6277b58c284fefd718865f9694e4ab9e06f5f4e3df11246a51350aeb"} Nov 26 22:24:14 crc kubenswrapper[4903]: I1126 22:24:14.957960 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-27m7h" Nov 26 22:24:15 crc kubenswrapper[4903]: I1126 22:24:15.212911 4903 patch_prober.go:28] interesting pod/downloads-7954f5f757-7qxf7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" start-of-body= Nov 26 22:24:15 crc kubenswrapper[4903]: I1126 22:24:15.213346 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7qxf7" podUID="e9e3e3eb-d7d9-4495-bd83-4107f00ae04a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" Nov 26 22:24:15 crc kubenswrapper[4903]: E1126 22:24:15.718477 4903 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 26 22:24:15 crc kubenswrapper[4903]: E1126 22:24:15.718779 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9z2pk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-6z5bc_openshift-marketplace(747780f1-ef94-4292-b306-4cce345c6a34): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 22:24:15 crc kubenswrapper[4903]: E1126 22:24:15.720053 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-6z5bc" podUID="747780f1-ef94-4292-b306-4cce345c6a34" Nov 26 22:24:18 crc kubenswrapper[4903]: E1126 22:24:18.463200 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-6z5bc" podUID="747780f1-ef94-4292-b306-4cce345c6a34" Nov 26 22:24:20 crc kubenswrapper[4903]: E1126 22:24:20.900139 4903 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 26 22:24:20 crc kubenswrapper[4903]: E1126 22:24:20.901549 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jksld,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-rjqdj_openshift-marketplace(a054a1e6-36d5-4c7c-a520-ee213f7f36fa): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 22:24:20 crc kubenswrapper[4903]: E1126 22:24:20.904415 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-rjqdj" podUID="a054a1e6-36d5-4c7c-a520-ee213f7f36fa" Nov 26 22:24:21 crc kubenswrapper[4903]: I1126 22:24:21.150994 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 22:24:21 crc kubenswrapper[4903]: E1126 22:24:21.365321 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-rjqdj" podUID="a054a1e6-36d5-4c7c-a520-ee213f7f36fa" Nov 26 22:24:22 crc kubenswrapper[4903]: E1126 22:24:22.037767 4903 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 26 22:24:22 crc kubenswrapper[4903]: E1126 22:24:22.037893 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-p8lrz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-z4f6g_openshift-marketplace(ee178cda-7bde-4997-8554-e20c3548288b): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 22:24:22 crc kubenswrapper[4903]: E1126 22:24:22.039741 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-z4f6g" podUID="ee178cda-7bde-4997-8554-e20c3548288b" Nov 26 22:24:22 crc kubenswrapper[4903]: E1126 22:24:22.604999 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-z4f6g" podUID="ee178cda-7bde-4997-8554-e20c3548288b" Nov 26 22:24:22 crc kubenswrapper[4903]: E1126 22:24:22.624515 4903 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 26 22:24:22 crc kubenswrapper[4903]: E1126 22:24:22.624665 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jqckx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-kg68t_openshift-marketplace(ebb30834-3c49-4766-993b-d52693207694): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 22:24:22 crc kubenswrapper[4903]: E1126 22:24:22.626113 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-kg68t" podUID="ebb30834-3c49-4766-993b-d52693207694" Nov 26 22:24:22 crc kubenswrapper[4903]: E1126 22:24:22.662249 4903 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 26 22:24:22 crc kubenswrapper[4903]: E1126 22:24:22.662415 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pflfd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-fttb4_openshift-marketplace(44dc303d-e9c6-4ced-9472-715779cd0dba): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 22:24:22 crc kubenswrapper[4903]: E1126 22:24:22.663609 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-fttb4" podUID="44dc303d-e9c6-4ced-9472-715779cd0dba" Nov 26 22:24:23 crc kubenswrapper[4903]: I1126 22:24:23.035315 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-q8dvw"] Nov 26 22:24:23 crc kubenswrapper[4903]: W1126 22:24:23.039909 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaef28737_00fd_4738_ae1f_e02a5b974905.slice/crio-dbd344cfbe595d6267b21fe31c2294379e5dfca283c379a534094c790330bdb0 WatchSource:0}: Error finding container dbd344cfbe595d6267b21fe31c2294379e5dfca283c379a534094c790330bdb0: Status 404 returned error can't find the container with id dbd344cfbe595d6267b21fe31c2294379e5dfca283c379a534094c790330bdb0 Nov 26 22:24:23 crc kubenswrapper[4903]: I1126 22:24:23.317710 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-7qxf7" event={"ID":"e9e3e3eb-d7d9-4495-bd83-4107f00ae04a","Type":"ContainerStarted","Data":"051e4e087d8af5b5a71ac4dc36d39b22211d12725f9ce83485db23e3680f09e8"} Nov 26 22:24:23 crc kubenswrapper[4903]: I1126 22:24:23.318820 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-7qxf7" Nov 26 22:24:23 crc kubenswrapper[4903]: I1126 22:24:23.319418 4903 patch_prober.go:28] interesting pod/downloads-7954f5f757-7qxf7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" start-of-body= Nov 26 22:24:23 crc kubenswrapper[4903]: I1126 22:24:23.319463 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7qxf7" podUID="e9e3e3eb-d7d9-4495-bd83-4107f00ae04a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" Nov 26 22:24:23 crc kubenswrapper[4903]: I1126 22:24:23.320655 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rz6wd" event={"ID":"955a0ea6-4526-4530-a0b9-f56b6ed4ff0b","Type":"ContainerStarted","Data":"c01eedb190a3ffaf225214cefde2ed5504df78e7d13165b5d865cb8309b6e8b7"} Nov 26 22:24:23 crc kubenswrapper[4903]: I1126 22:24:23.322171 4903 generic.go:334] "Generic (PLEG): container finished" podID="7717a36b-65f8-4c9a-b4be-5ab83fe77c99" containerID="be74f9276aa959616140769767e3534adb5153e5e32f754e11a2c6e8bf63c604" exitCode=0 Nov 26 22:24:23 crc kubenswrapper[4903]: I1126 22:24:23.322212 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mmswh" event={"ID":"7717a36b-65f8-4c9a-b4be-5ab83fe77c99","Type":"ContainerDied","Data":"be74f9276aa959616140769767e3534adb5153e5e32f754e11a2c6e8bf63c604"} Nov 26 22:24:23 crc kubenswrapper[4903]: I1126 22:24:23.332259 4903 generic.go:334] "Generic (PLEG): container finished" podID="e5cf807b-aa56-4158-90c9-519d5e076459" containerID="402a1565a24052e27c7ffe6fe1fa87d15e87c893aa85c75bdefc70ad679983eb" exitCode=0 Nov 26 22:24:23 crc kubenswrapper[4903]: I1126 22:24:23.332358 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ddms7" event={"ID":"e5cf807b-aa56-4158-90c9-519d5e076459","Type":"ContainerDied","Data":"402a1565a24052e27c7ffe6fe1fa87d15e87c893aa85c75bdefc70ad679983eb"} Nov 26 22:24:23 crc kubenswrapper[4903]: I1126 22:24:23.337320 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-q8dvw" event={"ID":"aef28737-00fd-4738-ae1f-e02a5b974905","Type":"ContainerStarted","Data":"b7cf12d87641a568380e1e9922a06e87c9cc64fab1df4a5ae5dc69cf391db544"} Nov 26 22:24:23 crc kubenswrapper[4903]: I1126 22:24:23.337349 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-q8dvw" event={"ID":"aef28737-00fd-4738-ae1f-e02a5b974905","Type":"ContainerStarted","Data":"dbd344cfbe595d6267b21fe31c2294379e5dfca283c379a534094c790330bdb0"} Nov 26 22:24:23 crc kubenswrapper[4903]: E1126 22:24:23.339428 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-fttb4" podUID="44dc303d-e9c6-4ced-9472-715779cd0dba" Nov 26 22:24:23 crc kubenswrapper[4903]: E1126 22:24:23.339494 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-kg68t" podUID="ebb30834-3c49-4766-993b-d52693207694" Nov 26 22:24:24 crc kubenswrapper[4903]: I1126 22:24:24.344631 4903 generic.go:334] "Generic (PLEG): container finished" podID="955a0ea6-4526-4530-a0b9-f56b6ed4ff0b" containerID="c01eedb190a3ffaf225214cefde2ed5504df78e7d13165b5d865cb8309b6e8b7" exitCode=0 Nov 26 22:24:24 crc kubenswrapper[4903]: I1126 22:24:24.344724 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rz6wd" event={"ID":"955a0ea6-4526-4530-a0b9-f56b6ed4ff0b","Type":"ContainerDied","Data":"c01eedb190a3ffaf225214cefde2ed5504df78e7d13165b5d865cb8309b6e8b7"} Nov 26 22:24:24 crc kubenswrapper[4903]: I1126 22:24:24.354233 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mmswh" event={"ID":"7717a36b-65f8-4c9a-b4be-5ab83fe77c99","Type":"ContainerStarted","Data":"877d3d598f58d2025041fa714321ea3ca5267f434ffec70b6dd136b698eb0920"} Nov 26 22:24:24 crc kubenswrapper[4903]: I1126 22:24:24.357347 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ddms7" event={"ID":"e5cf807b-aa56-4158-90c9-519d5e076459","Type":"ContainerStarted","Data":"6bddeafd78befe74c5f3902c7d0aea374386b938fa143fcd8fd34ee152b6c76f"} Nov 26 22:24:24 crc kubenswrapper[4903]: I1126 22:24:24.361594 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-q8dvw" event={"ID":"aef28737-00fd-4738-ae1f-e02a5b974905","Type":"ContainerStarted","Data":"555fc74eb785461ba5909ebfcd66c24f599fef761cef07a55ed26b7de5ba93ae"} Nov 26 22:24:24 crc kubenswrapper[4903]: I1126 22:24:24.361978 4903 patch_prober.go:28] interesting pod/downloads-7954f5f757-7qxf7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" start-of-body= Nov 26 22:24:24 crc kubenswrapper[4903]: I1126 22:24:24.362010 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7qxf7" podUID="e9e3e3eb-d7d9-4495-bd83-4107f00ae04a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" Nov 26 22:24:24 crc kubenswrapper[4903]: I1126 22:24:24.385593 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-ddms7" podStartSLOduration=3.245655356 podStartE2EDuration="45.385572973s" podCreationTimestamp="2025-11-26 22:23:39 +0000 UTC" firstStartedPulling="2025-11-26 22:23:41.864099802 +0000 UTC m=+150.554334712" lastFinishedPulling="2025-11-26 22:24:24.004017419 +0000 UTC m=+192.694252329" observedRunningTime="2025-11-26 22:24:24.382832018 +0000 UTC m=+193.073066958" watchObservedRunningTime="2025-11-26 22:24:24.385572973 +0000 UTC m=+193.075807893" Nov 26 22:24:24 crc kubenswrapper[4903]: I1126 22:24:24.401046 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-q8dvw" podStartSLOduration=172.401025526 podStartE2EDuration="2m52.401025526s" podCreationTimestamp="2025-11-26 22:21:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:24:24.399889284 +0000 UTC m=+193.090124194" watchObservedRunningTime="2025-11-26 22:24:24.401025526 +0000 UTC m=+193.091260436" Nov 26 22:24:24 crc kubenswrapper[4903]: I1126 22:24:24.430671 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-mmswh" podStartSLOduration=4.452130644 podStartE2EDuration="46.430652116s" podCreationTimestamp="2025-11-26 22:23:38 +0000 UTC" firstStartedPulling="2025-11-26 22:23:41.849574465 +0000 UTC m=+150.539809375" lastFinishedPulling="2025-11-26 22:24:23.828095917 +0000 UTC m=+192.518330847" observedRunningTime="2025-11-26 22:24:24.427568282 +0000 UTC m=+193.117803202" watchObservedRunningTime="2025-11-26 22:24:24.430652116 +0000 UTC m=+193.120887036" Nov 26 22:24:25 crc kubenswrapper[4903]: I1126 22:24:25.210783 4903 patch_prober.go:28] interesting pod/downloads-7954f5f757-7qxf7 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" start-of-body= Nov 26 22:24:25 crc kubenswrapper[4903]: I1126 22:24:25.211060 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-7qxf7" podUID="e9e3e3eb-d7d9-4495-bd83-4107f00ae04a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" Nov 26 22:24:25 crc kubenswrapper[4903]: I1126 22:24:25.210879 4903 patch_prober.go:28] interesting pod/downloads-7954f5f757-7qxf7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" start-of-body= Nov 26 22:24:25 crc kubenswrapper[4903]: I1126 22:24:25.211291 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7qxf7" podUID="e9e3e3eb-d7d9-4495-bd83-4107f00ae04a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" Nov 26 22:24:25 crc kubenswrapper[4903]: I1126 22:24:25.370055 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rz6wd" event={"ID":"955a0ea6-4526-4530-a0b9-f56b6ed4ff0b","Type":"ContainerStarted","Data":"d58030b35c1111aa31b38719cbbb02ee527c52d700d19f1cd1d6b6d7748eb3b6"} Nov 26 22:24:25 crc kubenswrapper[4903]: I1126 22:24:25.370682 4903 patch_prober.go:28] interesting pod/downloads-7954f5f757-7qxf7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" start-of-body= Nov 26 22:24:25 crc kubenswrapper[4903]: I1126 22:24:25.370790 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7qxf7" podUID="e9e3e3eb-d7d9-4495-bd83-4107f00ae04a" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.26:8080/\": dial tcp 10.217.0.26:8080: connect: connection refused" Nov 26 22:24:25 crc kubenswrapper[4903]: I1126 22:24:25.393095 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-rz6wd" podStartSLOduration=2.480081024 podStartE2EDuration="43.393077087s" podCreationTimestamp="2025-11-26 22:23:42 +0000 UTC" firstStartedPulling="2025-11-26 22:23:43.980416589 +0000 UTC m=+152.670651539" lastFinishedPulling="2025-11-26 22:24:24.893412682 +0000 UTC m=+193.583647602" observedRunningTime="2025-11-26 22:24:25.389141359 +0000 UTC m=+194.079376279" watchObservedRunningTime="2025-11-26 22:24:25.393077087 +0000 UTC m=+194.083311997" Nov 26 22:24:29 crc kubenswrapper[4903]: I1126 22:24:29.188270 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-mmswh" Nov 26 22:24:29 crc kubenswrapper[4903]: I1126 22:24:29.188323 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-mmswh" Nov 26 22:24:29 crc kubenswrapper[4903]: I1126 22:24:29.577536 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-mmswh" Nov 26 22:24:29 crc kubenswrapper[4903]: I1126 22:24:29.610864 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-ddms7" Nov 26 22:24:29 crc kubenswrapper[4903]: I1126 22:24:29.611627 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-ddms7" Nov 26 22:24:29 crc kubenswrapper[4903]: I1126 22:24:29.635658 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-mmswh" Nov 26 22:24:29 crc kubenswrapper[4903]: I1126 22:24:29.699084 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-ddms7" Nov 26 22:24:30 crc kubenswrapper[4903]: I1126 22:24:30.454546 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-ddms7" Nov 26 22:24:31 crc kubenswrapper[4903]: I1126 22:24:31.562847 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ddms7"] Nov 26 22:24:31 crc kubenswrapper[4903]: I1126 22:24:31.981510 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 22:24:31 crc kubenswrapper[4903]: I1126 22:24:31.981595 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 22:24:32 crc kubenswrapper[4903]: I1126 22:24:32.778072 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-rz6wd" Nov 26 22:24:32 crc kubenswrapper[4903]: I1126 22:24:32.778575 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-rz6wd" Nov 26 22:24:32 crc kubenswrapper[4903]: I1126 22:24:32.848625 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-rz6wd" Nov 26 22:24:33 crc kubenswrapper[4903]: I1126 22:24:33.421049 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-ddms7" podUID="e5cf807b-aa56-4158-90c9-519d5e076459" containerName="registry-server" containerID="cri-o://6bddeafd78befe74c5f3902c7d0aea374386b938fa143fcd8fd34ee152b6c76f" gracePeriod=2 Nov 26 22:24:33 crc kubenswrapper[4903]: I1126 22:24:33.488836 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-rz6wd" Nov 26 22:24:34 crc kubenswrapper[4903]: I1126 22:24:34.430798 4903 generic.go:334] "Generic (PLEG): container finished" podID="e5cf807b-aa56-4158-90c9-519d5e076459" containerID="6bddeafd78befe74c5f3902c7d0aea374386b938fa143fcd8fd34ee152b6c76f" exitCode=0 Nov 26 22:24:34 crc kubenswrapper[4903]: I1126 22:24:34.430945 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ddms7" event={"ID":"e5cf807b-aa56-4158-90c9-519d5e076459","Type":"ContainerDied","Data":"6bddeafd78befe74c5f3902c7d0aea374386b938fa143fcd8fd34ee152b6c76f"} Nov 26 22:24:34 crc kubenswrapper[4903]: I1126 22:24:34.838121 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ddms7" Nov 26 22:24:34 crc kubenswrapper[4903]: I1126 22:24:34.856934 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5cf807b-aa56-4158-90c9-519d5e076459-utilities\") pod \"e5cf807b-aa56-4158-90c9-519d5e076459\" (UID: \"e5cf807b-aa56-4158-90c9-519d5e076459\") " Nov 26 22:24:34 crc kubenswrapper[4903]: I1126 22:24:34.857305 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bbwps\" (UniqueName: \"kubernetes.io/projected/e5cf807b-aa56-4158-90c9-519d5e076459-kube-api-access-bbwps\") pod \"e5cf807b-aa56-4158-90c9-519d5e076459\" (UID: \"e5cf807b-aa56-4158-90c9-519d5e076459\") " Nov 26 22:24:34 crc kubenswrapper[4903]: I1126 22:24:34.857384 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5cf807b-aa56-4158-90c9-519d5e076459-catalog-content\") pod \"e5cf807b-aa56-4158-90c9-519d5e076459\" (UID: \"e5cf807b-aa56-4158-90c9-519d5e076459\") " Nov 26 22:24:34 crc kubenswrapper[4903]: I1126 22:24:34.858063 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5cf807b-aa56-4158-90c9-519d5e076459-utilities" (OuterVolumeSpecName: "utilities") pod "e5cf807b-aa56-4158-90c9-519d5e076459" (UID: "e5cf807b-aa56-4158-90c9-519d5e076459"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:24:34 crc kubenswrapper[4903]: I1126 22:24:34.864943 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5cf807b-aa56-4158-90c9-519d5e076459-kube-api-access-bbwps" (OuterVolumeSpecName: "kube-api-access-bbwps") pod "e5cf807b-aa56-4158-90c9-519d5e076459" (UID: "e5cf807b-aa56-4158-90c9-519d5e076459"). InnerVolumeSpecName "kube-api-access-bbwps". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:24:34 crc kubenswrapper[4903]: I1126 22:24:34.921647 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5cf807b-aa56-4158-90c9-519d5e076459-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e5cf807b-aa56-4158-90c9-519d5e076459" (UID: "e5cf807b-aa56-4158-90c9-519d5e076459"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:24:34 crc kubenswrapper[4903]: I1126 22:24:34.958809 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5cf807b-aa56-4158-90c9-519d5e076459-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 22:24:34 crc kubenswrapper[4903]: I1126 22:24:34.958842 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bbwps\" (UniqueName: \"kubernetes.io/projected/e5cf807b-aa56-4158-90c9-519d5e076459-kube-api-access-bbwps\") on node \"crc\" DevicePath \"\"" Nov 26 22:24:34 crc kubenswrapper[4903]: I1126 22:24:34.958857 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5cf807b-aa56-4158-90c9-519d5e076459-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 22:24:35 crc kubenswrapper[4903]: I1126 22:24:35.226357 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-7qxf7" Nov 26 22:24:35 crc kubenswrapper[4903]: I1126 22:24:35.439156 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ddms7" event={"ID":"e5cf807b-aa56-4158-90c9-519d5e076459","Type":"ContainerDied","Data":"892a98652785fe10eb0ffb7bea99f78dce665653cef0dcc141bd0b71a755c52f"} Nov 26 22:24:35 crc kubenswrapper[4903]: I1126 22:24:35.439214 4903 scope.go:117] "RemoveContainer" containerID="6bddeafd78befe74c5f3902c7d0aea374386b938fa143fcd8fd34ee152b6c76f" Nov 26 22:24:35 crc kubenswrapper[4903]: I1126 22:24:35.439236 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ddms7" Nov 26 22:24:35 crc kubenswrapper[4903]: I1126 22:24:35.484584 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ddms7"] Nov 26 22:24:35 crc kubenswrapper[4903]: I1126 22:24:35.485647 4903 scope.go:117] "RemoveContainer" containerID="402a1565a24052e27c7ffe6fe1fa87d15e87c893aa85c75bdefc70ad679983eb" Nov 26 22:24:35 crc kubenswrapper[4903]: I1126 22:24:35.490950 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-ddms7"] Nov 26 22:24:35 crc kubenswrapper[4903]: I1126 22:24:35.508103 4903 scope.go:117] "RemoveContainer" containerID="b4915d22af8c264b10fcb353f18e137d766a2057cbd0aafaacc43c7a64dcfcf3" Nov 26 22:24:35 crc kubenswrapper[4903]: I1126 22:24:35.765084 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rz6wd"] Nov 26 22:24:35 crc kubenswrapper[4903]: I1126 22:24:35.765687 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-rz6wd" podUID="955a0ea6-4526-4530-a0b9-f56b6ed4ff0b" containerName="registry-server" containerID="cri-o://d58030b35c1111aa31b38719cbbb02ee527c52d700d19f1cd1d6b6d7748eb3b6" gracePeriod=2 Nov 26 22:24:36 crc kubenswrapper[4903]: I1126 22:24:36.053260 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5cf807b-aa56-4158-90c9-519d5e076459" path="/var/lib/kubelet/pods/e5cf807b-aa56-4158-90c9-519d5e076459/volumes" Nov 26 22:24:36 crc kubenswrapper[4903]: I1126 22:24:36.447478 4903 generic.go:334] "Generic (PLEG): container finished" podID="955a0ea6-4526-4530-a0b9-f56b6ed4ff0b" containerID="d58030b35c1111aa31b38719cbbb02ee527c52d700d19f1cd1d6b6d7748eb3b6" exitCode=0 Nov 26 22:24:36 crc kubenswrapper[4903]: I1126 22:24:36.447537 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rz6wd" event={"ID":"955a0ea6-4526-4530-a0b9-f56b6ed4ff0b","Type":"ContainerDied","Data":"d58030b35c1111aa31b38719cbbb02ee527c52d700d19f1cd1d6b6d7748eb3b6"} Nov 26 22:24:38 crc kubenswrapper[4903]: I1126 22:24:38.540319 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rz6wd" Nov 26 22:24:38 crc kubenswrapper[4903]: I1126 22:24:38.715759 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/955a0ea6-4526-4530-a0b9-f56b6ed4ff0b-catalog-content\") pod \"955a0ea6-4526-4530-a0b9-f56b6ed4ff0b\" (UID: \"955a0ea6-4526-4530-a0b9-f56b6ed4ff0b\") " Nov 26 22:24:38 crc kubenswrapper[4903]: I1126 22:24:38.715998 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-86qnz\" (UniqueName: \"kubernetes.io/projected/955a0ea6-4526-4530-a0b9-f56b6ed4ff0b-kube-api-access-86qnz\") pod \"955a0ea6-4526-4530-a0b9-f56b6ed4ff0b\" (UID: \"955a0ea6-4526-4530-a0b9-f56b6ed4ff0b\") " Nov 26 22:24:38 crc kubenswrapper[4903]: I1126 22:24:38.716282 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/955a0ea6-4526-4530-a0b9-f56b6ed4ff0b-utilities\") pod \"955a0ea6-4526-4530-a0b9-f56b6ed4ff0b\" (UID: \"955a0ea6-4526-4530-a0b9-f56b6ed4ff0b\") " Nov 26 22:24:38 crc kubenswrapper[4903]: I1126 22:24:38.717645 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/955a0ea6-4526-4530-a0b9-f56b6ed4ff0b-utilities" (OuterVolumeSpecName: "utilities") pod "955a0ea6-4526-4530-a0b9-f56b6ed4ff0b" (UID: "955a0ea6-4526-4530-a0b9-f56b6ed4ff0b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:24:38 crc kubenswrapper[4903]: I1126 22:24:38.723939 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/955a0ea6-4526-4530-a0b9-f56b6ed4ff0b-kube-api-access-86qnz" (OuterVolumeSpecName: "kube-api-access-86qnz") pod "955a0ea6-4526-4530-a0b9-f56b6ed4ff0b" (UID: "955a0ea6-4526-4530-a0b9-f56b6ed4ff0b"). InnerVolumeSpecName "kube-api-access-86qnz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:24:38 crc kubenswrapper[4903]: I1126 22:24:38.818060 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-86qnz\" (UniqueName: \"kubernetes.io/projected/955a0ea6-4526-4530-a0b9-f56b6ed4ff0b-kube-api-access-86qnz\") on node \"crc\" DevicePath \"\"" Nov 26 22:24:38 crc kubenswrapper[4903]: I1126 22:24:38.818595 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/955a0ea6-4526-4530-a0b9-f56b6ed4ff0b-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 22:24:38 crc kubenswrapper[4903]: I1126 22:24:38.838764 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/955a0ea6-4526-4530-a0b9-f56b6ed4ff0b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "955a0ea6-4526-4530-a0b9-f56b6ed4ff0b" (UID: "955a0ea6-4526-4530-a0b9-f56b6ed4ff0b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:24:38 crc kubenswrapper[4903]: I1126 22:24:38.920327 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/955a0ea6-4526-4530-a0b9-f56b6ed4ff0b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 22:24:39 crc kubenswrapper[4903]: I1126 22:24:39.511931 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6z5bc" event={"ID":"747780f1-ef94-4292-b306-4cce345c6a34","Type":"ContainerStarted","Data":"e16f9c72b29623bb1b1af4a4ab9a273efacb7bc63bc2309839e394457eb888af"} Nov 26 22:24:39 crc kubenswrapper[4903]: I1126 22:24:39.519462 4903 generic.go:334] "Generic (PLEG): container finished" podID="44dc303d-e9c6-4ced-9472-715779cd0dba" containerID="2c4d63f057a54f3947500cae85f3cc393067c72477b0445c0017d9d3ddc0e036" exitCode=0 Nov 26 22:24:39 crc kubenswrapper[4903]: I1126 22:24:39.519572 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fttb4" event={"ID":"44dc303d-e9c6-4ced-9472-715779cd0dba","Type":"ContainerDied","Data":"2c4d63f057a54f3947500cae85f3cc393067c72477b0445c0017d9d3ddc0e036"} Nov 26 22:24:39 crc kubenswrapper[4903]: I1126 22:24:39.524054 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rz6wd" event={"ID":"955a0ea6-4526-4530-a0b9-f56b6ed4ff0b","Type":"ContainerDied","Data":"e81a22ae817001f6b7821e3d3792d0d43cbfa1112f6c969800683d5fbcb44ede"} Nov 26 22:24:39 crc kubenswrapper[4903]: I1126 22:24:39.524152 4903 scope.go:117] "RemoveContainer" containerID="d58030b35c1111aa31b38719cbbb02ee527c52d700d19f1cd1d6b6d7748eb3b6" Nov 26 22:24:39 crc kubenswrapper[4903]: I1126 22:24:39.524230 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rz6wd" Nov 26 22:24:39 crc kubenswrapper[4903]: I1126 22:24:39.527187 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rjqdj" event={"ID":"a054a1e6-36d5-4c7c-a520-ee213f7f36fa","Type":"ContainerStarted","Data":"2f32909fce861170978ee0a3b78bd88cca22e3f93ddce0a52ff1ef9eeb108efc"} Nov 26 22:24:39 crc kubenswrapper[4903]: I1126 22:24:39.557252 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z4f6g" event={"ID":"ee178cda-7bde-4997-8554-e20c3548288b","Type":"ContainerStarted","Data":"6f2aa09cfba49dc89f7183a4b5a54238613aba625ff59cdd50967269817001c4"} Nov 26 22:24:39 crc kubenswrapper[4903]: I1126 22:24:39.667536 4903 scope.go:117] "RemoveContainer" containerID="c01eedb190a3ffaf225214cefde2ed5504df78e7d13165b5d865cb8309b6e8b7" Nov 26 22:24:39 crc kubenswrapper[4903]: I1126 22:24:39.674498 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rz6wd"] Nov 26 22:24:39 crc kubenswrapper[4903]: I1126 22:24:39.685405 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-rz6wd"] Nov 26 22:24:39 crc kubenswrapper[4903]: I1126 22:24:39.698393 4903 scope.go:117] "RemoveContainer" containerID="3d3d1af2a8371d259291bdf597d033c61e0235db82cc25a456d4fe836e7ca467" Nov 26 22:24:40 crc kubenswrapper[4903]: I1126 22:24:40.035772 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="955a0ea6-4526-4530-a0b9-f56b6ed4ff0b" path="/var/lib/kubelet/pods/955a0ea6-4526-4530-a0b9-f56b6ed4ff0b/volumes" Nov 26 22:24:40 crc kubenswrapper[4903]: I1126 22:24:40.567093 4903 generic.go:334] "Generic (PLEG): container finished" podID="ee178cda-7bde-4997-8554-e20c3548288b" containerID="6f2aa09cfba49dc89f7183a4b5a54238613aba625ff59cdd50967269817001c4" exitCode=0 Nov 26 22:24:40 crc kubenswrapper[4903]: I1126 22:24:40.567194 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z4f6g" event={"ID":"ee178cda-7bde-4997-8554-e20c3548288b","Type":"ContainerDied","Data":"6f2aa09cfba49dc89f7183a4b5a54238613aba625ff59cdd50967269817001c4"} Nov 26 22:24:40 crc kubenswrapper[4903]: I1126 22:24:40.570040 4903 generic.go:334] "Generic (PLEG): container finished" podID="a054a1e6-36d5-4c7c-a520-ee213f7f36fa" containerID="2f32909fce861170978ee0a3b78bd88cca22e3f93ddce0a52ff1ef9eeb108efc" exitCode=0 Nov 26 22:24:40 crc kubenswrapper[4903]: I1126 22:24:40.570094 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rjqdj" event={"ID":"a054a1e6-36d5-4c7c-a520-ee213f7f36fa","Type":"ContainerDied","Data":"2f32909fce861170978ee0a3b78bd88cca22e3f93ddce0a52ff1ef9eeb108efc"} Nov 26 22:24:40 crc kubenswrapper[4903]: I1126 22:24:40.572638 4903 generic.go:334] "Generic (PLEG): container finished" podID="747780f1-ef94-4292-b306-4cce345c6a34" containerID="e16f9c72b29623bb1b1af4a4ab9a273efacb7bc63bc2309839e394457eb888af" exitCode=0 Nov 26 22:24:40 crc kubenswrapper[4903]: I1126 22:24:40.572677 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6z5bc" event={"ID":"747780f1-ef94-4292-b306-4cce345c6a34","Type":"ContainerDied","Data":"e16f9c72b29623bb1b1af4a4ab9a273efacb7bc63bc2309839e394457eb888af"} Nov 26 22:24:40 crc kubenswrapper[4903]: I1126 22:24:40.575331 4903 generic.go:334] "Generic (PLEG): container finished" podID="ebb30834-3c49-4766-993b-d52693207694" containerID="cf806f267dda887af16faac73f0314912fa6cc88d6244cbba5303f99027ce1d8" exitCode=0 Nov 26 22:24:40 crc kubenswrapper[4903]: I1126 22:24:40.575381 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kg68t" event={"ID":"ebb30834-3c49-4766-993b-d52693207694","Type":"ContainerDied","Data":"cf806f267dda887af16faac73f0314912fa6cc88d6244cbba5303f99027ce1d8"} Nov 26 22:24:42 crc kubenswrapper[4903]: I1126 22:24:42.591944 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z4f6g" event={"ID":"ee178cda-7bde-4997-8554-e20c3548288b","Type":"ContainerStarted","Data":"8edf5dd0cd0cfba11e4e07cc416fa46448c1c593659d1205939913a4180c5d36"} Nov 26 22:24:42 crc kubenswrapper[4903]: I1126 22:24:42.593600 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rjqdj" event={"ID":"a054a1e6-36d5-4c7c-a520-ee213f7f36fa","Type":"ContainerStarted","Data":"9216c2776a6e1ec3ea54bb4f49398a0bda53da3b6e3f5124d911e0a68ff69ec3"} Nov 26 22:24:42 crc kubenswrapper[4903]: I1126 22:24:42.596288 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6z5bc" event={"ID":"747780f1-ef94-4292-b306-4cce345c6a34","Type":"ContainerStarted","Data":"fe6464e628c8e9fac3d7efbbb4f3237c856cc516e8cc76454834d44333b83069"} Nov 26 22:24:42 crc kubenswrapper[4903]: I1126 22:24:42.597950 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kg68t" event={"ID":"ebb30834-3c49-4766-993b-d52693207694","Type":"ContainerStarted","Data":"73521c314daedcf2e8b2a28e081f70332dbf2411e824a8e81434166a4166ee1b"} Nov 26 22:24:42 crc kubenswrapper[4903]: I1126 22:24:42.599693 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fttb4" event={"ID":"44dc303d-e9c6-4ced-9472-715779cd0dba","Type":"ContainerStarted","Data":"7eec9fcc99ae3ef8da1cecd5c07021d094c406f8b4d5cf36c20e4e53fc93afc5"} Nov 26 22:24:42 crc kubenswrapper[4903]: I1126 22:24:42.613974 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-z4f6g" podStartSLOduration=3.624342188 podStartE2EDuration="1m3.613958188s" podCreationTimestamp="2025-11-26 22:23:39 +0000 UTC" firstStartedPulling="2025-11-26 22:23:41.809468438 +0000 UTC m=+150.499703348" lastFinishedPulling="2025-11-26 22:24:41.799084408 +0000 UTC m=+210.489319348" observedRunningTime="2025-11-26 22:24:42.611533711 +0000 UTC m=+211.301768621" watchObservedRunningTime="2025-11-26 22:24:42.613958188 +0000 UTC m=+211.304193098" Nov 26 22:24:42 crc kubenswrapper[4903]: I1126 22:24:42.642111 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-kg68t" podStartSLOduration=3.717649514 podStartE2EDuration="1m1.642092697s" podCreationTimestamp="2025-11-26 22:23:41 +0000 UTC" firstStartedPulling="2025-11-26 22:23:44.004032745 +0000 UTC m=+152.694267685" lastFinishedPulling="2025-11-26 22:24:41.928475948 +0000 UTC m=+210.618710868" observedRunningTime="2025-11-26 22:24:42.627057327 +0000 UTC m=+211.317292237" watchObservedRunningTime="2025-11-26 22:24:42.642092697 +0000 UTC m=+211.332327607" Nov 26 22:24:42 crc kubenswrapper[4903]: I1126 22:24:42.643424 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-rjqdj" podStartSLOduration=2.6056626720000002 podStartE2EDuration="1m0.643418674s" podCreationTimestamp="2025-11-26 22:23:42 +0000 UTC" firstStartedPulling="2025-11-26 22:23:43.994794513 +0000 UTC m=+152.685029463" lastFinishedPulling="2025-11-26 22:24:42.032550555 +0000 UTC m=+210.722785465" observedRunningTime="2025-11-26 22:24:42.640268448 +0000 UTC m=+211.330503358" watchObservedRunningTime="2025-11-26 22:24:42.643418674 +0000 UTC m=+211.333653584" Nov 26 22:24:42 crc kubenswrapper[4903]: I1126 22:24:42.661190 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-fttb4" podStartSLOduration=2.804943935 podStartE2EDuration="1m1.66117579s" podCreationTimestamp="2025-11-26 22:23:41 +0000 UTC" firstStartedPulling="2025-11-26 22:23:42.965956036 +0000 UTC m=+151.656190946" lastFinishedPulling="2025-11-26 22:24:41.822187871 +0000 UTC m=+210.512422801" observedRunningTime="2025-11-26 22:24:42.658534887 +0000 UTC m=+211.348769807" watchObservedRunningTime="2025-11-26 22:24:42.66117579 +0000 UTC m=+211.351410700" Nov 26 22:24:42 crc kubenswrapper[4903]: I1126 22:24:42.675268 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-6z5bc" podStartSLOduration=3.471656301 podStartE2EDuration="1m3.675252155s" podCreationTimestamp="2025-11-26 22:23:39 +0000 UTC" firstStartedPulling="2025-11-26 22:23:41.821196388 +0000 UTC m=+150.511431298" lastFinishedPulling="2025-11-26 22:24:42.024792232 +0000 UTC m=+210.715027152" observedRunningTime="2025-11-26 22:24:42.673772294 +0000 UTC m=+211.364007204" watchObservedRunningTime="2025-11-26 22:24:42.675252155 +0000 UTC m=+211.365487065" Nov 26 22:24:49 crc kubenswrapper[4903]: I1126 22:24:49.375038 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-z4f6g" Nov 26 22:24:49 crc kubenswrapper[4903]: I1126 22:24:49.375486 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-z4f6g" Nov 26 22:24:49 crc kubenswrapper[4903]: I1126 22:24:49.414267 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-z4f6g" Nov 26 22:24:49 crc kubenswrapper[4903]: I1126 22:24:49.666784 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-z4f6g" Nov 26 22:24:49 crc kubenswrapper[4903]: I1126 22:24:49.819320 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-6z5bc" Nov 26 22:24:49 crc kubenswrapper[4903]: I1126 22:24:49.819384 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-6z5bc" Nov 26 22:24:49 crc kubenswrapper[4903]: I1126 22:24:49.874132 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-6z5bc" Nov 26 22:24:50 crc kubenswrapper[4903]: I1126 22:24:50.695331 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-6z5bc" Nov 26 22:24:51 crc kubenswrapper[4903]: I1126 22:24:51.160163 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6z5bc"] Nov 26 22:24:51 crc kubenswrapper[4903]: I1126 22:24:51.558103 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-fttb4" Nov 26 22:24:51 crc kubenswrapper[4903]: I1126 22:24:51.558476 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-fttb4" Nov 26 22:24:51 crc kubenswrapper[4903]: I1126 22:24:51.602495 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-fttb4" Nov 26 22:24:51 crc kubenswrapper[4903]: I1126 22:24:51.681121 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-fttb4" Nov 26 22:24:51 crc kubenswrapper[4903]: I1126 22:24:51.945741 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-kg68t" Nov 26 22:24:51 crc kubenswrapper[4903]: I1126 22:24:51.945800 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-kg68t" Nov 26 22:24:51 crc kubenswrapper[4903]: I1126 22:24:51.993184 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-kg68t" Nov 26 22:24:52 crc kubenswrapper[4903]: I1126 22:24:52.284890 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-rjqdj" Nov 26 22:24:52 crc kubenswrapper[4903]: I1126 22:24:52.284951 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-rjqdj" Nov 26 22:24:52 crc kubenswrapper[4903]: I1126 22:24:52.324734 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-rjqdj" Nov 26 22:24:52 crc kubenswrapper[4903]: I1126 22:24:52.661768 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-6z5bc" podUID="747780f1-ef94-4292-b306-4cce345c6a34" containerName="registry-server" containerID="cri-o://fe6464e628c8e9fac3d7efbbb4f3237c856cc516e8cc76454834d44333b83069" gracePeriod=2 Nov 26 22:24:52 crc kubenswrapper[4903]: I1126 22:24:52.708487 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-kg68t" Nov 26 22:24:52 crc kubenswrapper[4903]: I1126 22:24:52.710856 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-rjqdj" Nov 26 22:24:53 crc kubenswrapper[4903]: I1126 22:24:53.668238 4903 generic.go:334] "Generic (PLEG): container finished" podID="747780f1-ef94-4292-b306-4cce345c6a34" containerID="fe6464e628c8e9fac3d7efbbb4f3237c856cc516e8cc76454834d44333b83069" exitCode=0 Nov 26 22:24:53 crc kubenswrapper[4903]: I1126 22:24:53.668338 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6z5bc" event={"ID":"747780f1-ef94-4292-b306-4cce345c6a34","Type":"ContainerDied","Data":"fe6464e628c8e9fac3d7efbbb4f3237c856cc516e8cc76454834d44333b83069"} Nov 26 22:24:53 crc kubenswrapper[4903]: I1126 22:24:53.904212 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6z5bc" Nov 26 22:24:53 crc kubenswrapper[4903]: I1126 22:24:53.937954 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/747780f1-ef94-4292-b306-4cce345c6a34-catalog-content\") pod \"747780f1-ef94-4292-b306-4cce345c6a34\" (UID: \"747780f1-ef94-4292-b306-4cce345c6a34\") " Nov 26 22:24:53 crc kubenswrapper[4903]: I1126 22:24:53.938048 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/747780f1-ef94-4292-b306-4cce345c6a34-utilities\") pod \"747780f1-ef94-4292-b306-4cce345c6a34\" (UID: \"747780f1-ef94-4292-b306-4cce345c6a34\") " Nov 26 22:24:53 crc kubenswrapper[4903]: I1126 22:24:53.938119 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9z2pk\" (UniqueName: \"kubernetes.io/projected/747780f1-ef94-4292-b306-4cce345c6a34-kube-api-access-9z2pk\") pod \"747780f1-ef94-4292-b306-4cce345c6a34\" (UID: \"747780f1-ef94-4292-b306-4cce345c6a34\") " Nov 26 22:24:53 crc kubenswrapper[4903]: I1126 22:24:53.938948 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/747780f1-ef94-4292-b306-4cce345c6a34-utilities" (OuterVolumeSpecName: "utilities") pod "747780f1-ef94-4292-b306-4cce345c6a34" (UID: "747780f1-ef94-4292-b306-4cce345c6a34"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:24:53 crc kubenswrapper[4903]: I1126 22:24:53.945026 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/747780f1-ef94-4292-b306-4cce345c6a34-kube-api-access-9z2pk" (OuterVolumeSpecName: "kube-api-access-9z2pk") pod "747780f1-ef94-4292-b306-4cce345c6a34" (UID: "747780f1-ef94-4292-b306-4cce345c6a34"). InnerVolumeSpecName "kube-api-access-9z2pk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:24:53 crc kubenswrapper[4903]: I1126 22:24:53.972197 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kg68t"] Nov 26 22:24:54 crc kubenswrapper[4903]: I1126 22:24:54.020500 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/747780f1-ef94-4292-b306-4cce345c6a34-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "747780f1-ef94-4292-b306-4cce345c6a34" (UID: "747780f1-ef94-4292-b306-4cce345c6a34"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:24:54 crc kubenswrapper[4903]: I1126 22:24:54.039799 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/747780f1-ef94-4292-b306-4cce345c6a34-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 22:24:54 crc kubenswrapper[4903]: I1126 22:24:54.039825 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/747780f1-ef94-4292-b306-4cce345c6a34-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 22:24:54 crc kubenswrapper[4903]: I1126 22:24:54.039835 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9z2pk\" (UniqueName: \"kubernetes.io/projected/747780f1-ef94-4292-b306-4cce345c6a34-kube-api-access-9z2pk\") on node \"crc\" DevicePath \"\"" Nov 26 22:24:54 crc kubenswrapper[4903]: I1126 22:24:54.073273 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-b7nk4"] Nov 26 22:24:54 crc kubenswrapper[4903]: I1126 22:24:54.676440 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6z5bc" event={"ID":"747780f1-ef94-4292-b306-4cce345c6a34","Type":"ContainerDied","Data":"ff2fe7588507129e55c8c6241632e80a5dec4907e58a49c43a7985eeacacfbe2"} Nov 26 22:24:54 crc kubenswrapper[4903]: I1126 22:24:54.676722 4903 scope.go:117] "RemoveContainer" containerID="fe6464e628c8e9fac3d7efbbb4f3237c856cc516e8cc76454834d44333b83069" Nov 26 22:24:54 crc kubenswrapper[4903]: I1126 22:24:54.676510 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6z5bc" Nov 26 22:24:54 crc kubenswrapper[4903]: I1126 22:24:54.676578 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-kg68t" podUID="ebb30834-3c49-4766-993b-d52693207694" containerName="registry-server" containerID="cri-o://73521c314daedcf2e8b2a28e081f70332dbf2411e824a8e81434166a4166ee1b" gracePeriod=2 Nov 26 22:24:54 crc kubenswrapper[4903]: I1126 22:24:54.694954 4903 scope.go:117] "RemoveContainer" containerID="e16f9c72b29623bb1b1af4a4ab9a273efacb7bc63bc2309839e394457eb888af" Nov 26 22:24:54 crc kubenswrapper[4903]: I1126 22:24:54.703283 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6z5bc"] Nov 26 22:24:54 crc kubenswrapper[4903]: I1126 22:24:54.711998 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-6z5bc"] Nov 26 22:24:54 crc kubenswrapper[4903]: I1126 22:24:54.737856 4903 scope.go:117] "RemoveContainer" containerID="c0bb96ed33c5d38c871a39b59f2d23e9740d5f774db9fbdbaa77522cce34b467" Nov 26 22:24:55 crc kubenswrapper[4903]: I1126 22:24:55.230072 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kg68t" Nov 26 22:24:55 crc kubenswrapper[4903]: I1126 22:24:55.253620 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jqckx\" (UniqueName: \"kubernetes.io/projected/ebb30834-3c49-4766-993b-d52693207694-kube-api-access-jqckx\") pod \"ebb30834-3c49-4766-993b-d52693207694\" (UID: \"ebb30834-3c49-4766-993b-d52693207694\") " Nov 26 22:24:55 crc kubenswrapper[4903]: I1126 22:24:55.253744 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ebb30834-3c49-4766-993b-d52693207694-catalog-content\") pod \"ebb30834-3c49-4766-993b-d52693207694\" (UID: \"ebb30834-3c49-4766-993b-d52693207694\") " Nov 26 22:24:55 crc kubenswrapper[4903]: I1126 22:24:55.253775 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ebb30834-3c49-4766-993b-d52693207694-utilities\") pod \"ebb30834-3c49-4766-993b-d52693207694\" (UID: \"ebb30834-3c49-4766-993b-d52693207694\") " Nov 26 22:24:55 crc kubenswrapper[4903]: I1126 22:24:55.254476 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ebb30834-3c49-4766-993b-d52693207694-utilities" (OuterVolumeSpecName: "utilities") pod "ebb30834-3c49-4766-993b-d52693207694" (UID: "ebb30834-3c49-4766-993b-d52693207694"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:24:55 crc kubenswrapper[4903]: I1126 22:24:55.258528 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ebb30834-3c49-4766-993b-d52693207694-kube-api-access-jqckx" (OuterVolumeSpecName: "kube-api-access-jqckx") pod "ebb30834-3c49-4766-993b-d52693207694" (UID: "ebb30834-3c49-4766-993b-d52693207694"). InnerVolumeSpecName "kube-api-access-jqckx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:24:55 crc kubenswrapper[4903]: I1126 22:24:55.269565 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ebb30834-3c49-4766-993b-d52693207694-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ebb30834-3c49-4766-993b-d52693207694" (UID: "ebb30834-3c49-4766-993b-d52693207694"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:24:55 crc kubenswrapper[4903]: I1126 22:24:55.355084 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ebb30834-3c49-4766-993b-d52693207694-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 22:24:55 crc kubenswrapper[4903]: I1126 22:24:55.355116 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ebb30834-3c49-4766-993b-d52693207694-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 22:24:55 crc kubenswrapper[4903]: I1126 22:24:55.355129 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jqckx\" (UniqueName: \"kubernetes.io/projected/ebb30834-3c49-4766-993b-d52693207694-kube-api-access-jqckx\") on node \"crc\" DevicePath \"\"" Nov 26 22:24:55 crc kubenswrapper[4903]: I1126 22:24:55.684636 4903 generic.go:334] "Generic (PLEG): container finished" podID="ebb30834-3c49-4766-993b-d52693207694" containerID="73521c314daedcf2e8b2a28e081f70332dbf2411e824a8e81434166a4166ee1b" exitCode=0 Nov 26 22:24:55 crc kubenswrapper[4903]: I1126 22:24:55.684678 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kg68t" event={"ID":"ebb30834-3c49-4766-993b-d52693207694","Type":"ContainerDied","Data":"73521c314daedcf2e8b2a28e081f70332dbf2411e824a8e81434166a4166ee1b"} Nov 26 22:24:55 crc kubenswrapper[4903]: I1126 22:24:55.684717 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kg68t" Nov 26 22:24:55 crc kubenswrapper[4903]: I1126 22:24:55.684736 4903 scope.go:117] "RemoveContainer" containerID="73521c314daedcf2e8b2a28e081f70332dbf2411e824a8e81434166a4166ee1b" Nov 26 22:24:55 crc kubenswrapper[4903]: I1126 22:24:55.684726 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kg68t" event={"ID":"ebb30834-3c49-4766-993b-d52693207694","Type":"ContainerDied","Data":"f2b7894dc34ddf310ecba74466508af39455e5d9567e330366831db49427fa54"} Nov 26 22:24:55 crc kubenswrapper[4903]: I1126 22:24:55.700114 4903 scope.go:117] "RemoveContainer" containerID="cf806f267dda887af16faac73f0314912fa6cc88d6244cbba5303f99027ce1d8" Nov 26 22:24:55 crc kubenswrapper[4903]: I1126 22:24:55.713219 4903 scope.go:117] "RemoveContainer" containerID="fc9ea7966eb710c621e7aecdfc48cb7b370d111204d7b47959f3cd70ca22d4ca" Nov 26 22:24:55 crc kubenswrapper[4903]: I1126 22:24:55.713423 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kg68t"] Nov 26 22:24:55 crc kubenswrapper[4903]: I1126 22:24:55.716122 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-kg68t"] Nov 26 22:24:55 crc kubenswrapper[4903]: I1126 22:24:55.742281 4903 scope.go:117] "RemoveContainer" containerID="73521c314daedcf2e8b2a28e081f70332dbf2411e824a8e81434166a4166ee1b" Nov 26 22:24:55 crc kubenswrapper[4903]: E1126 22:24:55.742733 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"73521c314daedcf2e8b2a28e081f70332dbf2411e824a8e81434166a4166ee1b\": container with ID starting with 73521c314daedcf2e8b2a28e081f70332dbf2411e824a8e81434166a4166ee1b not found: ID does not exist" containerID="73521c314daedcf2e8b2a28e081f70332dbf2411e824a8e81434166a4166ee1b" Nov 26 22:24:55 crc kubenswrapper[4903]: I1126 22:24:55.742786 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"73521c314daedcf2e8b2a28e081f70332dbf2411e824a8e81434166a4166ee1b"} err="failed to get container status \"73521c314daedcf2e8b2a28e081f70332dbf2411e824a8e81434166a4166ee1b\": rpc error: code = NotFound desc = could not find container \"73521c314daedcf2e8b2a28e081f70332dbf2411e824a8e81434166a4166ee1b\": container with ID starting with 73521c314daedcf2e8b2a28e081f70332dbf2411e824a8e81434166a4166ee1b not found: ID does not exist" Nov 26 22:24:55 crc kubenswrapper[4903]: I1126 22:24:55.742818 4903 scope.go:117] "RemoveContainer" containerID="cf806f267dda887af16faac73f0314912fa6cc88d6244cbba5303f99027ce1d8" Nov 26 22:24:55 crc kubenswrapper[4903]: E1126 22:24:55.743208 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf806f267dda887af16faac73f0314912fa6cc88d6244cbba5303f99027ce1d8\": container with ID starting with cf806f267dda887af16faac73f0314912fa6cc88d6244cbba5303f99027ce1d8 not found: ID does not exist" containerID="cf806f267dda887af16faac73f0314912fa6cc88d6244cbba5303f99027ce1d8" Nov 26 22:24:55 crc kubenswrapper[4903]: I1126 22:24:55.743262 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf806f267dda887af16faac73f0314912fa6cc88d6244cbba5303f99027ce1d8"} err="failed to get container status \"cf806f267dda887af16faac73f0314912fa6cc88d6244cbba5303f99027ce1d8\": rpc error: code = NotFound desc = could not find container \"cf806f267dda887af16faac73f0314912fa6cc88d6244cbba5303f99027ce1d8\": container with ID starting with cf806f267dda887af16faac73f0314912fa6cc88d6244cbba5303f99027ce1d8 not found: ID does not exist" Nov 26 22:24:55 crc kubenswrapper[4903]: I1126 22:24:55.743297 4903 scope.go:117] "RemoveContainer" containerID="fc9ea7966eb710c621e7aecdfc48cb7b370d111204d7b47959f3cd70ca22d4ca" Nov 26 22:24:55 crc kubenswrapper[4903]: E1126 22:24:55.743829 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fc9ea7966eb710c621e7aecdfc48cb7b370d111204d7b47959f3cd70ca22d4ca\": container with ID starting with fc9ea7966eb710c621e7aecdfc48cb7b370d111204d7b47959f3cd70ca22d4ca not found: ID does not exist" containerID="fc9ea7966eb710c621e7aecdfc48cb7b370d111204d7b47959f3cd70ca22d4ca" Nov 26 22:24:55 crc kubenswrapper[4903]: I1126 22:24:55.743865 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc9ea7966eb710c621e7aecdfc48cb7b370d111204d7b47959f3cd70ca22d4ca"} err="failed to get container status \"fc9ea7966eb710c621e7aecdfc48cb7b370d111204d7b47959f3cd70ca22d4ca\": rpc error: code = NotFound desc = could not find container \"fc9ea7966eb710c621e7aecdfc48cb7b370d111204d7b47959f3cd70ca22d4ca\": container with ID starting with fc9ea7966eb710c621e7aecdfc48cb7b370d111204d7b47959f3cd70ca22d4ca not found: ID does not exist" Nov 26 22:24:56 crc kubenswrapper[4903]: I1126 22:24:56.034654 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="747780f1-ef94-4292-b306-4cce345c6a34" path="/var/lib/kubelet/pods/747780f1-ef94-4292-b306-4cce345c6a34/volumes" Nov 26 22:24:56 crc kubenswrapper[4903]: I1126 22:24:56.035337 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ebb30834-3c49-4766-993b-d52693207694" path="/var/lib/kubelet/pods/ebb30834-3c49-4766-993b-d52693207694/volumes" Nov 26 22:25:01 crc kubenswrapper[4903]: I1126 22:25:01.981967 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 22:25:01 crc kubenswrapper[4903]: I1126 22:25:01.982627 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 22:25:01 crc kubenswrapper[4903]: I1126 22:25:01.982727 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 22:25:01 crc kubenswrapper[4903]: I1126 22:25:01.983552 4903 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740"} pod="openshift-machine-config-operator/machine-config-daemon-wjwph" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 22:25:01 crc kubenswrapper[4903]: I1126 22:25:01.983645 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" containerID="cri-o://0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740" gracePeriod=600 Nov 26 22:25:02 crc kubenswrapper[4903]: I1126 22:25:02.733082 4903 generic.go:334] "Generic (PLEG): container finished" podID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerID="0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740" exitCode=0 Nov 26 22:25:02 crc kubenswrapper[4903]: I1126 22:25:02.733221 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerDied","Data":"0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740"} Nov 26 22:25:02 crc kubenswrapper[4903]: I1126 22:25:02.733361 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerStarted","Data":"96e687a4eac5ec0d09c0b75e4590018ddcce7bd80d552c8e11b1f99591cbaa37"} Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.102734 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" podUID="fcc90cdd-595c-4d40-908e-12b1586dfd43" containerName="oauth-openshift" containerID="cri-o://865a0040345d63e5234a54e30d78cce93cdf13dc4e2afa2e1f330b4c50cb8b9b" gracePeriod=15 Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.517328 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.560587 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-8f56ccf5-6jgrk"] Nov 26 22:25:19 crc kubenswrapper[4903]: E1126 22:25:19.560980 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebb30834-3c49-4766-993b-d52693207694" containerName="extract-utilities" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.561016 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebb30834-3c49-4766-993b-d52693207694" containerName="extract-utilities" Nov 26 22:25:19 crc kubenswrapper[4903]: E1126 22:25:19.561035 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a6cd498-d18b-4c56-a1ab-1b8e202690be" containerName="pruner" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.561048 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a6cd498-d18b-4c56-a1ab-1b8e202690be" containerName="pruner" Nov 26 22:25:19 crc kubenswrapper[4903]: E1126 22:25:19.561066 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5cf807b-aa56-4158-90c9-519d5e076459" containerName="extract-content" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.561079 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5cf807b-aa56-4158-90c9-519d5e076459" containerName="extract-content" Nov 26 22:25:19 crc kubenswrapper[4903]: E1126 22:25:19.561096 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="747780f1-ef94-4292-b306-4cce345c6a34" containerName="registry-server" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.561107 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="747780f1-ef94-4292-b306-4cce345c6a34" containerName="registry-server" Nov 26 22:25:19 crc kubenswrapper[4903]: E1126 22:25:19.561125 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5cf807b-aa56-4158-90c9-519d5e076459" containerName="extract-utilities" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.561137 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5cf807b-aa56-4158-90c9-519d5e076459" containerName="extract-utilities" Nov 26 22:25:19 crc kubenswrapper[4903]: E1126 22:25:19.561156 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5cf807b-aa56-4158-90c9-519d5e076459" containerName="registry-server" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.561170 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5cf807b-aa56-4158-90c9-519d5e076459" containerName="registry-server" Nov 26 22:25:19 crc kubenswrapper[4903]: E1126 22:25:19.561184 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="747780f1-ef94-4292-b306-4cce345c6a34" containerName="extract-content" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.561196 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="747780f1-ef94-4292-b306-4cce345c6a34" containerName="extract-content" Nov 26 22:25:19 crc kubenswrapper[4903]: E1126 22:25:19.561212 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebb30834-3c49-4766-993b-d52693207694" containerName="extract-content" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.561224 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebb30834-3c49-4766-993b-d52693207694" containerName="extract-content" Nov 26 22:25:19 crc kubenswrapper[4903]: E1126 22:25:19.561244 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="955a0ea6-4526-4530-a0b9-f56b6ed4ff0b" containerName="extract-utilities" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.561257 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="955a0ea6-4526-4530-a0b9-f56b6ed4ff0b" containerName="extract-utilities" Nov 26 22:25:19 crc kubenswrapper[4903]: E1126 22:25:19.561274 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="747780f1-ef94-4292-b306-4cce345c6a34" containerName="extract-utilities" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.561288 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="747780f1-ef94-4292-b306-4cce345c6a34" containerName="extract-utilities" Nov 26 22:25:19 crc kubenswrapper[4903]: E1126 22:25:19.561301 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebb30834-3c49-4766-993b-d52693207694" containerName="registry-server" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.561313 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebb30834-3c49-4766-993b-d52693207694" containerName="registry-server" Nov 26 22:25:19 crc kubenswrapper[4903]: E1126 22:25:19.561330 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fcc90cdd-595c-4d40-908e-12b1586dfd43" containerName="oauth-openshift" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.561342 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="fcc90cdd-595c-4d40-908e-12b1586dfd43" containerName="oauth-openshift" Nov 26 22:25:19 crc kubenswrapper[4903]: E1126 22:25:19.561359 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed4c41af-65e0-4b74-834f-346c66d32263" containerName="pruner" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.561371 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed4c41af-65e0-4b74-834f-346c66d32263" containerName="pruner" Nov 26 22:25:19 crc kubenswrapper[4903]: E1126 22:25:19.561385 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="955a0ea6-4526-4530-a0b9-f56b6ed4ff0b" containerName="extract-content" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.561396 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="955a0ea6-4526-4530-a0b9-f56b6ed4ff0b" containerName="extract-content" Nov 26 22:25:19 crc kubenswrapper[4903]: E1126 22:25:19.561416 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="955a0ea6-4526-4530-a0b9-f56b6ed4ff0b" containerName="registry-server" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.561428 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="955a0ea6-4526-4530-a0b9-f56b6ed4ff0b" containerName="registry-server" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.561621 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a6cd498-d18b-4c56-a1ab-1b8e202690be" containerName="pruner" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.561647 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="955a0ea6-4526-4530-a0b9-f56b6ed4ff0b" containerName="registry-server" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.561666 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="ebb30834-3c49-4766-993b-d52693207694" containerName="registry-server" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.561684 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed4c41af-65e0-4b74-834f-346c66d32263" containerName="pruner" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.561736 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="747780f1-ef94-4292-b306-4cce345c6a34" containerName="registry-server" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.561760 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="fcc90cdd-595c-4d40-908e-12b1586dfd43" containerName="oauth-openshift" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.561789 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5cf807b-aa56-4158-90c9-519d5e076459" containerName="registry-server" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.562413 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.576688 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-8f56ccf5-6jgrk"] Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.650133 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-cliconfig\") pod \"fcc90cdd-595c-4d40-908e-12b1586dfd43\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.650182 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-ocp-branding-template\") pod \"fcc90cdd-595c-4d40-908e-12b1586dfd43\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.650214 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-router-certs\") pod \"fcc90cdd-595c-4d40-908e-12b1586dfd43\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.650274 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-user-template-error\") pod \"fcc90cdd-595c-4d40-908e-12b1586dfd43\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.650315 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-user-idp-0-file-data\") pod \"fcc90cdd-595c-4d40-908e-12b1586dfd43\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.650351 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-trusted-ca-bundle\") pod \"fcc90cdd-595c-4d40-908e-12b1586dfd43\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.650398 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/fcc90cdd-595c-4d40-908e-12b1586dfd43-audit-policies\") pod \"fcc90cdd-595c-4d40-908e-12b1586dfd43\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.650442 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-54pb4\" (UniqueName: \"kubernetes.io/projected/fcc90cdd-595c-4d40-908e-12b1586dfd43-kube-api-access-54pb4\") pod \"fcc90cdd-595c-4d40-908e-12b1586dfd43\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.650477 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-session\") pod \"fcc90cdd-595c-4d40-908e-12b1586dfd43\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.650518 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-user-template-provider-selection\") pod \"fcc90cdd-595c-4d40-908e-12b1586dfd43\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.650542 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-serving-cert\") pod \"fcc90cdd-595c-4d40-908e-12b1586dfd43\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.650570 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/fcc90cdd-595c-4d40-908e-12b1586dfd43-audit-dir\") pod \"fcc90cdd-595c-4d40-908e-12b1586dfd43\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.650594 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-service-ca\") pod \"fcc90cdd-595c-4d40-908e-12b1586dfd43\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.650636 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-user-template-login\") pod \"fcc90cdd-595c-4d40-908e-12b1586dfd43\" (UID: \"fcc90cdd-595c-4d40-908e-12b1586dfd43\") " Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.650847 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.650896 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.650929 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.650983 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-audit-dir\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.651014 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-system-serving-cert\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.651051 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-user-template-login\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.651083 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-user-template-error\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.651116 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.651153 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-system-session\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.651199 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-system-service-ca\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.651234 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-audit-policies\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.651267 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-system-cliconfig\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.651285 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "fcc90cdd-595c-4d40-908e-12b1586dfd43" (UID: "fcc90cdd-595c-4d40-908e-12b1586dfd43"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.651325 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-system-router-certs\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.651541 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtv5z\" (UniqueName: \"kubernetes.io/projected/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-kube-api-access-jtv5z\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.651578 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fcc90cdd-595c-4d40-908e-12b1586dfd43-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "fcc90cdd-595c-4d40-908e-12b1586dfd43" (UID: "fcc90cdd-595c-4d40-908e-12b1586dfd43"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.651645 4903 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.651682 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "fcc90cdd-595c-4d40-908e-12b1586dfd43" (UID: "fcc90cdd-595c-4d40-908e-12b1586dfd43"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.651819 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/fcc90cdd-595c-4d40-908e-12b1586dfd43-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "fcc90cdd-595c-4d40-908e-12b1586dfd43" (UID: "fcc90cdd-595c-4d40-908e-12b1586dfd43"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.652188 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "fcc90cdd-595c-4d40-908e-12b1586dfd43" (UID: "fcc90cdd-595c-4d40-908e-12b1586dfd43"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.659291 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fcc90cdd-595c-4d40-908e-12b1586dfd43-kube-api-access-54pb4" (OuterVolumeSpecName: "kube-api-access-54pb4") pod "fcc90cdd-595c-4d40-908e-12b1586dfd43" (UID: "fcc90cdd-595c-4d40-908e-12b1586dfd43"). InnerVolumeSpecName "kube-api-access-54pb4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.659511 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "fcc90cdd-595c-4d40-908e-12b1586dfd43" (UID: "fcc90cdd-595c-4d40-908e-12b1586dfd43"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.659815 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "fcc90cdd-595c-4d40-908e-12b1586dfd43" (UID: "fcc90cdd-595c-4d40-908e-12b1586dfd43"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.660304 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "fcc90cdd-595c-4d40-908e-12b1586dfd43" (UID: "fcc90cdd-595c-4d40-908e-12b1586dfd43"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.660857 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "fcc90cdd-595c-4d40-908e-12b1586dfd43" (UID: "fcc90cdd-595c-4d40-908e-12b1586dfd43"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.661128 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "fcc90cdd-595c-4d40-908e-12b1586dfd43" (UID: "fcc90cdd-595c-4d40-908e-12b1586dfd43"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.664010 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "fcc90cdd-595c-4d40-908e-12b1586dfd43" (UID: "fcc90cdd-595c-4d40-908e-12b1586dfd43"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.664314 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "fcc90cdd-595c-4d40-908e-12b1586dfd43" (UID: "fcc90cdd-595c-4d40-908e-12b1586dfd43"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.664890 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "fcc90cdd-595c-4d40-908e-12b1586dfd43" (UID: "fcc90cdd-595c-4d40-908e-12b1586dfd43"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.752473 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-system-router-certs\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.752541 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtv5z\" (UniqueName: \"kubernetes.io/projected/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-kube-api-access-jtv5z\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.752593 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.752644 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.752736 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.752779 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-audit-dir\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.752820 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-system-serving-cert\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.752872 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-user-template-login\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.752922 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-user-template-error\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.752963 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.753013 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-system-session\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.753067 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-system-service-ca\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.753103 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-audit-policies\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.753137 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-system-cliconfig\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.753236 4903 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.753259 4903 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.753283 4903 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/fcc90cdd-595c-4d40-908e-12b1586dfd43-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.753306 4903 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.753325 4903 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.753350 4903 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.753370 4903 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.753389 4903 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.753412 4903 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.753433 4903 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.753453 4903 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/fcc90cdd-595c-4d40-908e-12b1586dfd43-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.753476 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-54pb4\" (UniqueName: \"kubernetes.io/projected/fcc90cdd-595c-4d40-908e-12b1586dfd43-kube-api-access-54pb4\") on node \"crc\" DevicePath \"\"" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.753497 4903 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/fcc90cdd-595c-4d40-908e-12b1586dfd43-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.753934 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-audit-dir\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.754684 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-system-cliconfig\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.756812 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-audit-policies\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.757495 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.757637 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-system-service-ca\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.759120 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.759752 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-user-template-login\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.759817 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-system-router-certs\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.760763 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-system-session\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.760871 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.761338 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-user-template-error\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.762016 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-system-serving-cert\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.762764 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.785422 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtv5z\" (UniqueName: \"kubernetes.io/projected/7616c4bb-aecf-4f50-8354-8bbb9103fbe9-kube-api-access-jtv5z\") pod \"oauth-openshift-8f56ccf5-6jgrk\" (UID: \"7616c4bb-aecf-4f50-8354-8bbb9103fbe9\") " pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.859813 4903 generic.go:334] "Generic (PLEG): container finished" podID="fcc90cdd-595c-4d40-908e-12b1586dfd43" containerID="865a0040345d63e5234a54e30d78cce93cdf13dc4e2afa2e1f330b4c50cb8b9b" exitCode=0 Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.859893 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" event={"ID":"fcc90cdd-595c-4d40-908e-12b1586dfd43","Type":"ContainerDied","Data":"865a0040345d63e5234a54e30d78cce93cdf13dc4e2afa2e1f330b4c50cb8b9b"} Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.859932 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.859957 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-b7nk4" event={"ID":"fcc90cdd-595c-4d40-908e-12b1586dfd43","Type":"ContainerDied","Data":"0c7bf9ed2afa4bfc6a62a925a3f6a9aefe2fa29e7da4c94862fd9551f0ddafb5"} Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.859997 4903 scope.go:117] "RemoveContainer" containerID="865a0040345d63e5234a54e30d78cce93cdf13dc4e2afa2e1f330b4c50cb8b9b" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.891004 4903 scope.go:117] "RemoveContainer" containerID="865a0040345d63e5234a54e30d78cce93cdf13dc4e2afa2e1f330b4c50cb8b9b" Nov 26 22:25:19 crc kubenswrapper[4903]: E1126 22:25:19.891575 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"865a0040345d63e5234a54e30d78cce93cdf13dc4e2afa2e1f330b4c50cb8b9b\": container with ID starting with 865a0040345d63e5234a54e30d78cce93cdf13dc4e2afa2e1f330b4c50cb8b9b not found: ID does not exist" containerID="865a0040345d63e5234a54e30d78cce93cdf13dc4e2afa2e1f330b4c50cb8b9b" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.891630 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"865a0040345d63e5234a54e30d78cce93cdf13dc4e2afa2e1f330b4c50cb8b9b"} err="failed to get container status \"865a0040345d63e5234a54e30d78cce93cdf13dc4e2afa2e1f330b4c50cb8b9b\": rpc error: code = NotFound desc = could not find container \"865a0040345d63e5234a54e30d78cce93cdf13dc4e2afa2e1f330b4c50cb8b9b\": container with ID starting with 865a0040345d63e5234a54e30d78cce93cdf13dc4e2afa2e1f330b4c50cb8b9b not found: ID does not exist" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.908131 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.915466 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-b7nk4"] Nov 26 22:25:19 crc kubenswrapper[4903]: I1126 22:25:19.920880 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-b7nk4"] Nov 26 22:25:20 crc kubenswrapper[4903]: I1126 22:25:20.041488 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fcc90cdd-595c-4d40-908e-12b1586dfd43" path="/var/lib/kubelet/pods/fcc90cdd-595c-4d40-908e-12b1586dfd43/volumes" Nov 26 22:25:20 crc kubenswrapper[4903]: I1126 22:25:20.184510 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-8f56ccf5-6jgrk"] Nov 26 22:25:20 crc kubenswrapper[4903]: W1126 22:25:20.192042 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7616c4bb_aecf_4f50_8354_8bbb9103fbe9.slice/crio-463020c66a316c7bccf51bad1cbd4b6e0bb8d3a845a1ba394add379e4b40f4c5 WatchSource:0}: Error finding container 463020c66a316c7bccf51bad1cbd4b6e0bb8d3a845a1ba394add379e4b40f4c5: Status 404 returned error can't find the container with id 463020c66a316c7bccf51bad1cbd4b6e0bb8d3a845a1ba394add379e4b40f4c5 Nov 26 22:25:20 crc kubenswrapper[4903]: I1126 22:25:20.877276 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" event={"ID":"7616c4bb-aecf-4f50-8354-8bbb9103fbe9","Type":"ContainerStarted","Data":"021edfc56d6d89f318316126cebe8124469d726fed7767a8a20f4e2a08d2c20a"} Nov 26 22:25:20 crc kubenswrapper[4903]: I1126 22:25:20.877362 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" event={"ID":"7616c4bb-aecf-4f50-8354-8bbb9103fbe9","Type":"ContainerStarted","Data":"463020c66a316c7bccf51bad1cbd4b6e0bb8d3a845a1ba394add379e4b40f4c5"} Nov 26 22:25:20 crc kubenswrapper[4903]: I1126 22:25:20.877964 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:20 crc kubenswrapper[4903]: I1126 22:25:20.886056 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" Nov 26 22:25:20 crc kubenswrapper[4903]: I1126 22:25:20.912841 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-8f56ccf5-6jgrk" podStartSLOduration=26.912810968 podStartE2EDuration="26.912810968s" podCreationTimestamp="2025-11-26 22:24:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:25:20.906745112 +0000 UTC m=+249.596980062" watchObservedRunningTime="2025-11-26 22:25:20.912810968 +0000 UTC m=+249.603045908" Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.673210 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z4f6g"] Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.674115 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-z4f6g" podUID="ee178cda-7bde-4997-8554-e20c3548288b" containerName="registry-server" containerID="cri-o://8edf5dd0cd0cfba11e4e07cc416fa46448c1c593659d1205939913a4180c5d36" gracePeriod=30 Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.680318 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mmswh"] Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.681463 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-mmswh" podUID="7717a36b-65f8-4c9a-b4be-5ab83fe77c99" containerName="registry-server" containerID="cri-o://877d3d598f58d2025041fa714321ea3ca5267f434ffec70b6dd136b698eb0920" gracePeriod=30 Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.684678 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-94zhz"] Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.684913 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-94zhz" podUID="2e6e49c1-f210-4ee8-af41-9e43123ae910" containerName="marketplace-operator" containerID="cri-o://e2850e2da8798f8cf1457ac6b6f84fd5bf2b605bf4671f51ace50c3bd71f6c0f" gracePeriod=30 Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.708060 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fttb4"] Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.708412 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-fttb4" podUID="44dc303d-e9c6-4ced-9472-715779cd0dba" containerName="registry-server" containerID="cri-o://7eec9fcc99ae3ef8da1cecd5c07021d094c406f8b4d5cf36c20e4e53fc93afc5" gracePeriod=30 Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.721099 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rjqdj"] Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.721402 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-rjqdj" podUID="a054a1e6-36d5-4c7c-a520-ee213f7f36fa" containerName="registry-server" containerID="cri-o://9216c2776a6e1ec3ea54bb4f49398a0bda53da3b6e3f5124d911e0a68ff69ec3" gracePeriod=30 Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.724266 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-f9lv7"] Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.727090 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-f9lv7" Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.740412 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-f9lv7"] Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.866104 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/ad2be713-f117-46a7-a491-d75a9564cd48-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-f9lv7\" (UID: \"ad2be713-f117-46a7-a491-d75a9564cd48\") " pod="openshift-marketplace/marketplace-operator-79b997595-f9lv7" Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.866239 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-65pmc\" (UniqueName: \"kubernetes.io/projected/ad2be713-f117-46a7-a491-d75a9564cd48-kube-api-access-65pmc\") pod \"marketplace-operator-79b997595-f9lv7\" (UID: \"ad2be713-f117-46a7-a491-d75a9564cd48\") " pod="openshift-marketplace/marketplace-operator-79b997595-f9lv7" Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.866275 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ad2be713-f117-46a7-a491-d75a9564cd48-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-f9lv7\" (UID: \"ad2be713-f117-46a7-a491-d75a9564cd48\") " pod="openshift-marketplace/marketplace-operator-79b997595-f9lv7" Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.957378 4903 generic.go:334] "Generic (PLEG): container finished" podID="7717a36b-65f8-4c9a-b4be-5ab83fe77c99" containerID="877d3d598f58d2025041fa714321ea3ca5267f434ffec70b6dd136b698eb0920" exitCode=0 Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.957416 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mmswh" event={"ID":"7717a36b-65f8-4c9a-b4be-5ab83fe77c99","Type":"ContainerDied","Data":"877d3d598f58d2025041fa714321ea3ca5267f434ffec70b6dd136b698eb0920"} Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.958553 4903 generic.go:334] "Generic (PLEG): container finished" podID="2e6e49c1-f210-4ee8-af41-9e43123ae910" containerID="e2850e2da8798f8cf1457ac6b6f84fd5bf2b605bf4671f51ace50c3bd71f6c0f" exitCode=0 Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.958596 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-94zhz" event={"ID":"2e6e49c1-f210-4ee8-af41-9e43123ae910","Type":"ContainerDied","Data":"e2850e2da8798f8cf1457ac6b6f84fd5bf2b605bf4671f51ace50c3bd71f6c0f"} Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.960288 4903 generic.go:334] "Generic (PLEG): container finished" podID="ee178cda-7bde-4997-8554-e20c3548288b" containerID="8edf5dd0cd0cfba11e4e07cc416fa46448c1c593659d1205939913a4180c5d36" exitCode=0 Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.960338 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z4f6g" event={"ID":"ee178cda-7bde-4997-8554-e20c3548288b","Type":"ContainerDied","Data":"8edf5dd0cd0cfba11e4e07cc416fa46448c1c593659d1205939913a4180c5d36"} Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.962210 4903 generic.go:334] "Generic (PLEG): container finished" podID="a054a1e6-36d5-4c7c-a520-ee213f7f36fa" containerID="9216c2776a6e1ec3ea54bb4f49398a0bda53da3b6e3f5124d911e0a68ff69ec3" exitCode=0 Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.962244 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rjqdj" event={"ID":"a054a1e6-36d5-4c7c-a520-ee213f7f36fa","Type":"ContainerDied","Data":"9216c2776a6e1ec3ea54bb4f49398a0bda53da3b6e3f5124d911e0a68ff69ec3"} Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.965031 4903 generic.go:334] "Generic (PLEG): container finished" podID="44dc303d-e9c6-4ced-9472-715779cd0dba" containerID="7eec9fcc99ae3ef8da1cecd5c07021d094c406f8b4d5cf36c20e4e53fc93afc5" exitCode=0 Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.965048 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fttb4" event={"ID":"44dc303d-e9c6-4ced-9472-715779cd0dba","Type":"ContainerDied","Data":"7eec9fcc99ae3ef8da1cecd5c07021d094c406f8b4d5cf36c20e4e53fc93afc5"} Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.966912 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-65pmc\" (UniqueName: \"kubernetes.io/projected/ad2be713-f117-46a7-a491-d75a9564cd48-kube-api-access-65pmc\") pod \"marketplace-operator-79b997595-f9lv7\" (UID: \"ad2be713-f117-46a7-a491-d75a9564cd48\") " pod="openshift-marketplace/marketplace-operator-79b997595-f9lv7" Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.966946 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ad2be713-f117-46a7-a491-d75a9564cd48-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-f9lv7\" (UID: \"ad2be713-f117-46a7-a491-d75a9564cd48\") " pod="openshift-marketplace/marketplace-operator-79b997595-f9lv7" Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.966971 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/ad2be713-f117-46a7-a491-d75a9564cd48-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-f9lv7\" (UID: \"ad2be713-f117-46a7-a491-d75a9564cd48\") " pod="openshift-marketplace/marketplace-operator-79b997595-f9lv7" Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.968115 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ad2be713-f117-46a7-a491-d75a9564cd48-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-f9lv7\" (UID: \"ad2be713-f117-46a7-a491-d75a9564cd48\") " pod="openshift-marketplace/marketplace-operator-79b997595-f9lv7" Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.973015 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/ad2be713-f117-46a7-a491-d75a9564cd48-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-f9lv7\" (UID: \"ad2be713-f117-46a7-a491-d75a9564cd48\") " pod="openshift-marketplace/marketplace-operator-79b997595-f9lv7" Nov 26 22:25:33 crc kubenswrapper[4903]: I1126 22:25:33.981314 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-65pmc\" (UniqueName: \"kubernetes.io/projected/ad2be713-f117-46a7-a491-d75a9564cd48-kube-api-access-65pmc\") pod \"marketplace-operator-79b997595-f9lv7\" (UID: \"ad2be713-f117-46a7-a491-d75a9564cd48\") " pod="openshift-marketplace/marketplace-operator-79b997595-f9lv7" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.116310 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-f9lv7" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.128415 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-94zhz" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.141172 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z4f6g" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.148190 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mmswh" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.176280 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rjqdj" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.209913 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fttb4" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.274112 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee178cda-7bde-4997-8554-e20c3548288b-catalog-content\") pod \"ee178cda-7bde-4997-8554-e20c3548288b\" (UID: \"ee178cda-7bde-4997-8554-e20c3548288b\") " Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.274146 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee178cda-7bde-4997-8554-e20c3548288b-utilities\") pod \"ee178cda-7bde-4997-8554-e20c3548288b\" (UID: \"ee178cda-7bde-4997-8554-e20c3548288b\") " Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.274170 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a054a1e6-36d5-4c7c-a520-ee213f7f36fa-utilities\") pod \"a054a1e6-36d5-4c7c-a520-ee213f7f36fa\" (UID: \"a054a1e6-36d5-4c7c-a520-ee213f7f36fa\") " Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.274202 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/2e6e49c1-f210-4ee8-af41-9e43123ae910-marketplace-operator-metrics\") pod \"2e6e49c1-f210-4ee8-af41-9e43123ae910\" (UID: \"2e6e49c1-f210-4ee8-af41-9e43123ae910\") " Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.274225 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7717a36b-65f8-4c9a-b4be-5ab83fe77c99-utilities\") pod \"7717a36b-65f8-4c9a-b4be-5ab83fe77c99\" (UID: \"7717a36b-65f8-4c9a-b4be-5ab83fe77c99\") " Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.274257 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p8lrz\" (UniqueName: \"kubernetes.io/projected/ee178cda-7bde-4997-8554-e20c3548288b-kube-api-access-p8lrz\") pod \"ee178cda-7bde-4997-8554-e20c3548288b\" (UID: \"ee178cda-7bde-4997-8554-e20c3548288b\") " Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.274280 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jksld\" (UniqueName: \"kubernetes.io/projected/a054a1e6-36d5-4c7c-a520-ee213f7f36fa-kube-api-access-jksld\") pod \"a054a1e6-36d5-4c7c-a520-ee213f7f36fa\" (UID: \"a054a1e6-36d5-4c7c-a520-ee213f7f36fa\") " Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.274300 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2e6e49c1-f210-4ee8-af41-9e43123ae910-marketplace-trusted-ca\") pod \"2e6e49c1-f210-4ee8-af41-9e43123ae910\" (UID: \"2e6e49c1-f210-4ee8-af41-9e43123ae910\") " Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.274329 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4nj7p\" (UniqueName: \"kubernetes.io/projected/2e6e49c1-f210-4ee8-af41-9e43123ae910-kube-api-access-4nj7p\") pod \"2e6e49c1-f210-4ee8-af41-9e43123ae910\" (UID: \"2e6e49c1-f210-4ee8-af41-9e43123ae910\") " Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.274367 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a054a1e6-36d5-4c7c-a520-ee213f7f36fa-catalog-content\") pod \"a054a1e6-36d5-4c7c-a520-ee213f7f36fa\" (UID: \"a054a1e6-36d5-4c7c-a520-ee213f7f36fa\") " Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.274403 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v4dd7\" (UniqueName: \"kubernetes.io/projected/7717a36b-65f8-4c9a-b4be-5ab83fe77c99-kube-api-access-v4dd7\") pod \"7717a36b-65f8-4c9a-b4be-5ab83fe77c99\" (UID: \"7717a36b-65f8-4c9a-b4be-5ab83fe77c99\") " Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.274422 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7717a36b-65f8-4c9a-b4be-5ab83fe77c99-catalog-content\") pod \"7717a36b-65f8-4c9a-b4be-5ab83fe77c99\" (UID: \"7717a36b-65f8-4c9a-b4be-5ab83fe77c99\") " Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.275218 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e6e49c1-f210-4ee8-af41-9e43123ae910-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "2e6e49c1-f210-4ee8-af41-9e43123ae910" (UID: "2e6e49c1-f210-4ee8-af41-9e43123ae910"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.275778 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee178cda-7bde-4997-8554-e20c3548288b-utilities" (OuterVolumeSpecName: "utilities") pod "ee178cda-7bde-4997-8554-e20c3548288b" (UID: "ee178cda-7bde-4997-8554-e20c3548288b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.276053 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7717a36b-65f8-4c9a-b4be-5ab83fe77c99-utilities" (OuterVolumeSpecName: "utilities") pod "7717a36b-65f8-4c9a-b4be-5ab83fe77c99" (UID: "7717a36b-65f8-4c9a-b4be-5ab83fe77c99"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.276438 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee178cda-7bde-4997-8554-e20c3548288b-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.276461 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7717a36b-65f8-4c9a-b4be-5ab83fe77c99-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.276506 4903 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2e6e49c1-f210-4ee8-af41-9e43123ae910-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.276535 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a054a1e6-36d5-4c7c-a520-ee213f7f36fa-utilities" (OuterVolumeSpecName: "utilities") pod "a054a1e6-36d5-4c7c-a520-ee213f7f36fa" (UID: "a054a1e6-36d5-4c7c-a520-ee213f7f36fa"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.278777 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7717a36b-65f8-4c9a-b4be-5ab83fe77c99-kube-api-access-v4dd7" (OuterVolumeSpecName: "kube-api-access-v4dd7") pod "7717a36b-65f8-4c9a-b4be-5ab83fe77c99" (UID: "7717a36b-65f8-4c9a-b4be-5ab83fe77c99"). InnerVolumeSpecName "kube-api-access-v4dd7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.278928 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee178cda-7bde-4997-8554-e20c3548288b-kube-api-access-p8lrz" (OuterVolumeSpecName: "kube-api-access-p8lrz") pod "ee178cda-7bde-4997-8554-e20c3548288b" (UID: "ee178cda-7bde-4997-8554-e20c3548288b"). InnerVolumeSpecName "kube-api-access-p8lrz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.278983 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e6e49c1-f210-4ee8-af41-9e43123ae910-kube-api-access-4nj7p" (OuterVolumeSpecName: "kube-api-access-4nj7p") pod "2e6e49c1-f210-4ee8-af41-9e43123ae910" (UID: "2e6e49c1-f210-4ee8-af41-9e43123ae910"). InnerVolumeSpecName "kube-api-access-4nj7p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.279113 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a054a1e6-36d5-4c7c-a520-ee213f7f36fa-kube-api-access-jksld" (OuterVolumeSpecName: "kube-api-access-jksld") pod "a054a1e6-36d5-4c7c-a520-ee213f7f36fa" (UID: "a054a1e6-36d5-4c7c-a520-ee213f7f36fa"). InnerVolumeSpecName "kube-api-access-jksld". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.279636 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e6e49c1-f210-4ee8-af41-9e43123ae910-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "2e6e49c1-f210-4ee8-af41-9e43123ae910" (UID: "2e6e49c1-f210-4ee8-af41-9e43123ae910"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.348069 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee178cda-7bde-4997-8554-e20c3548288b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ee178cda-7bde-4997-8554-e20c3548288b" (UID: "ee178cda-7bde-4997-8554-e20c3548288b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.349361 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7717a36b-65f8-4c9a-b4be-5ab83fe77c99-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7717a36b-65f8-4c9a-b4be-5ab83fe77c99" (UID: "7717a36b-65f8-4c9a-b4be-5ab83fe77c99"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.377818 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44dc303d-e9c6-4ced-9472-715779cd0dba-catalog-content\") pod \"44dc303d-e9c6-4ced-9472-715779cd0dba\" (UID: \"44dc303d-e9c6-4ced-9472-715779cd0dba\") " Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.377875 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44dc303d-e9c6-4ced-9472-715779cd0dba-utilities\") pod \"44dc303d-e9c6-4ced-9472-715779cd0dba\" (UID: \"44dc303d-e9c6-4ced-9472-715779cd0dba\") " Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.377902 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pflfd\" (UniqueName: \"kubernetes.io/projected/44dc303d-e9c6-4ced-9472-715779cd0dba-kube-api-access-pflfd\") pod \"44dc303d-e9c6-4ced-9472-715779cd0dba\" (UID: \"44dc303d-e9c6-4ced-9472-715779cd0dba\") " Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.378130 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jksld\" (UniqueName: \"kubernetes.io/projected/a054a1e6-36d5-4c7c-a520-ee213f7f36fa-kube-api-access-jksld\") on node \"crc\" DevicePath \"\"" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.378141 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4nj7p\" (UniqueName: \"kubernetes.io/projected/2e6e49c1-f210-4ee8-af41-9e43123ae910-kube-api-access-4nj7p\") on node \"crc\" DevicePath \"\"" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.378150 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v4dd7\" (UniqueName: \"kubernetes.io/projected/7717a36b-65f8-4c9a-b4be-5ab83fe77c99-kube-api-access-v4dd7\") on node \"crc\" DevicePath \"\"" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.378159 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7717a36b-65f8-4c9a-b4be-5ab83fe77c99-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.378168 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee178cda-7bde-4997-8554-e20c3548288b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.378176 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a054a1e6-36d5-4c7c-a520-ee213f7f36fa-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.378185 4903 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/2e6e49c1-f210-4ee8-af41-9e43123ae910-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.378195 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p8lrz\" (UniqueName: \"kubernetes.io/projected/ee178cda-7bde-4997-8554-e20c3548288b-kube-api-access-p8lrz\") on node \"crc\" DevicePath \"\"" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.379020 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/44dc303d-e9c6-4ced-9472-715779cd0dba-utilities" (OuterVolumeSpecName: "utilities") pod "44dc303d-e9c6-4ced-9472-715779cd0dba" (UID: "44dc303d-e9c6-4ced-9472-715779cd0dba"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.381703 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44dc303d-e9c6-4ced-9472-715779cd0dba-kube-api-access-pflfd" (OuterVolumeSpecName: "kube-api-access-pflfd") pod "44dc303d-e9c6-4ced-9472-715779cd0dba" (UID: "44dc303d-e9c6-4ced-9472-715779cd0dba"). InnerVolumeSpecName "kube-api-access-pflfd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.403004 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/44dc303d-e9c6-4ced-9472-715779cd0dba-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "44dc303d-e9c6-4ced-9472-715779cd0dba" (UID: "44dc303d-e9c6-4ced-9472-715779cd0dba"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.426652 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a054a1e6-36d5-4c7c-a520-ee213f7f36fa-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a054a1e6-36d5-4c7c-a520-ee213f7f36fa" (UID: "a054a1e6-36d5-4c7c-a520-ee213f7f36fa"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.479468 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44dc303d-e9c6-4ced-9472-715779cd0dba-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.479495 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44dc303d-e9c6-4ced-9472-715779cd0dba-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.479505 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a054a1e6-36d5-4c7c-a520-ee213f7f36fa-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.479514 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pflfd\" (UniqueName: \"kubernetes.io/projected/44dc303d-e9c6-4ced-9472-715779cd0dba-kube-api-access-pflfd\") on node \"crc\" DevicePath \"\"" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.544559 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-f9lv7"] Nov 26 22:25:34 crc kubenswrapper[4903]: W1126 22:25:34.547958 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podad2be713_f117_46a7_a491_d75a9564cd48.slice/crio-164278f15213c61669967579491dbea67e4792fa55f5dd71e8dba37752935a4d WatchSource:0}: Error finding container 164278f15213c61669967579491dbea67e4792fa55f5dd71e8dba37752935a4d: Status 404 returned error can't find the container with id 164278f15213c61669967579491dbea67e4792fa55f5dd71e8dba37752935a4d Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.971391 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rjqdj" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.971394 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rjqdj" event={"ID":"a054a1e6-36d5-4c7c-a520-ee213f7f36fa","Type":"ContainerDied","Data":"6dcda3aca602a3c55780b3e002247af081476858ab88e9da403e57fc97c878df"} Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.971650 4903 scope.go:117] "RemoveContainer" containerID="9216c2776a6e1ec3ea54bb4f49398a0bda53da3b6e3f5124d911e0a68ff69ec3" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.974440 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fttb4" event={"ID":"44dc303d-e9c6-4ced-9472-715779cd0dba","Type":"ContainerDied","Data":"a722fac1484ae4dc823ec8b1d0f3d02a20429fdcd8ffbefbb7f310fc3b721e4d"} Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.974597 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fttb4" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.975789 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-f9lv7" event={"ID":"ad2be713-f117-46a7-a491-d75a9564cd48","Type":"ContainerStarted","Data":"7084fda8324d1bcfbc10daf70a84b43bd0b29005c8f0d71c99d974893c23823b"} Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.975823 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-f9lv7" event={"ID":"ad2be713-f117-46a7-a491-d75a9564cd48","Type":"ContainerStarted","Data":"164278f15213c61669967579491dbea67e4792fa55f5dd71e8dba37752935a4d"} Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.977714 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mmswh" event={"ID":"7717a36b-65f8-4c9a-b4be-5ab83fe77c99","Type":"ContainerDied","Data":"2e8c15b417bc3cc44a5f5314328ac526d67a90fef70a6026cda9ce6b54fe857b"} Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.977728 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mmswh" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.978608 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-94zhz" event={"ID":"2e6e49c1-f210-4ee8-af41-9e43123ae910","Type":"ContainerDied","Data":"e916306917abffea8b6ade0b700c82732bdd349be72b442bb22acafa08e0a223"} Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.978660 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-94zhz" Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.981005 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z4f6g" event={"ID":"ee178cda-7bde-4997-8554-e20c3548288b","Type":"ContainerDied","Data":"0d5abc3dea573597f875ab724f2ea29f041583f19f7903b6a43599ddb873c44e"} Nov 26 22:25:34 crc kubenswrapper[4903]: I1126 22:25:34.981080 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z4f6g" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.002996 4903 scope.go:117] "RemoveContainer" containerID="2f32909fce861170978ee0a3b78bd88cca22e3f93ddce0a52ff1ef9eeb108efc" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.009114 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-f9lv7" podStartSLOduration=2.009099388 podStartE2EDuration="2.009099388s" podCreationTimestamp="2025-11-26 22:25:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:25:35.008929593 +0000 UTC m=+263.699164523" watchObservedRunningTime="2025-11-26 22:25:35.009099388 +0000 UTC m=+263.699334298" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.021221 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z4f6g"] Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.028919 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-z4f6g"] Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.036893 4903 scope.go:117] "RemoveContainer" containerID="229320a83cff469bd5f0b9666f9e9a3f3124ce6408764ac43683a140c2b95694" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.038269 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fttb4"] Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.042048 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-fttb4"] Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.050959 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rjqdj"] Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.053261 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-rjqdj"] Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.063843 4903 scope.go:117] "RemoveContainer" containerID="7eec9fcc99ae3ef8da1cecd5c07021d094c406f8b4d5cf36c20e4e53fc93afc5" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.067265 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mmswh"] Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.074611 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-mmswh"] Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.081567 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-94zhz"] Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.081942 4903 scope.go:117] "RemoveContainer" containerID="2c4d63f057a54f3947500cae85f3cc393067c72477b0445c0017d9d3ddc0e036" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.083910 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-94zhz"] Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.098880 4903 scope.go:117] "RemoveContainer" containerID="f0d9f7c8687b30f16665e98ba8cefaff2131441b8bfff5aa979781061912514d" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.118278 4903 scope.go:117] "RemoveContainer" containerID="877d3d598f58d2025041fa714321ea3ca5267f434ffec70b6dd136b698eb0920" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.131594 4903 scope.go:117] "RemoveContainer" containerID="be74f9276aa959616140769767e3534adb5153e5e32f754e11a2c6e8bf63c604" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.143888 4903 scope.go:117] "RemoveContainer" containerID="dca04f7477e69aedc751edb25b99b381187a4049519212291cc07112fb6de157" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.158360 4903 scope.go:117] "RemoveContainer" containerID="e2850e2da8798f8cf1457ac6b6f84fd5bf2b605bf4671f51ace50c3bd71f6c0f" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.172122 4903 scope.go:117] "RemoveContainer" containerID="8edf5dd0cd0cfba11e4e07cc416fa46448c1c593659d1205939913a4180c5d36" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.184803 4903 scope.go:117] "RemoveContainer" containerID="6f2aa09cfba49dc89f7183a4b5a54238613aba625ff59cdd50967269817001c4" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.202851 4903 scope.go:117] "RemoveContainer" containerID="7043053169eb4f3eb83c7ec4360aea8ca493053e79b6e0d5dabf9cb0ee6ee07b" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.927775 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qldpj"] Nov 26 22:25:35 crc kubenswrapper[4903]: E1126 22:25:35.928139 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7717a36b-65f8-4c9a-b4be-5ab83fe77c99" containerName="extract-content" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.928149 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="7717a36b-65f8-4c9a-b4be-5ab83fe77c99" containerName="extract-content" Nov 26 22:25:35 crc kubenswrapper[4903]: E1126 22:25:35.928160 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee178cda-7bde-4997-8554-e20c3548288b" containerName="extract-utilities" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.928165 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee178cda-7bde-4997-8554-e20c3548288b" containerName="extract-utilities" Nov 26 22:25:35 crc kubenswrapper[4903]: E1126 22:25:35.928174 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44dc303d-e9c6-4ced-9472-715779cd0dba" containerName="extract-utilities" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.928180 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="44dc303d-e9c6-4ced-9472-715779cd0dba" containerName="extract-utilities" Nov 26 22:25:35 crc kubenswrapper[4903]: E1126 22:25:35.928188 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee178cda-7bde-4997-8554-e20c3548288b" containerName="registry-server" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.928194 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee178cda-7bde-4997-8554-e20c3548288b" containerName="registry-server" Nov 26 22:25:35 crc kubenswrapper[4903]: E1126 22:25:35.928203 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44dc303d-e9c6-4ced-9472-715779cd0dba" containerName="registry-server" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.928209 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="44dc303d-e9c6-4ced-9472-715779cd0dba" containerName="registry-server" Nov 26 22:25:35 crc kubenswrapper[4903]: E1126 22:25:35.928217 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7717a36b-65f8-4c9a-b4be-5ab83fe77c99" containerName="registry-server" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.928223 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="7717a36b-65f8-4c9a-b4be-5ab83fe77c99" containerName="registry-server" Nov 26 22:25:35 crc kubenswrapper[4903]: E1126 22:25:35.928229 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44dc303d-e9c6-4ced-9472-715779cd0dba" containerName="extract-content" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.928235 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="44dc303d-e9c6-4ced-9472-715779cd0dba" containerName="extract-content" Nov 26 22:25:35 crc kubenswrapper[4903]: E1126 22:25:35.928240 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee178cda-7bde-4997-8554-e20c3548288b" containerName="extract-content" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.928247 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee178cda-7bde-4997-8554-e20c3548288b" containerName="extract-content" Nov 26 22:25:35 crc kubenswrapper[4903]: E1126 22:25:35.928253 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a054a1e6-36d5-4c7c-a520-ee213f7f36fa" containerName="extract-utilities" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.928258 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="a054a1e6-36d5-4c7c-a520-ee213f7f36fa" containerName="extract-utilities" Nov 26 22:25:35 crc kubenswrapper[4903]: E1126 22:25:35.928265 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e6e49c1-f210-4ee8-af41-9e43123ae910" containerName="marketplace-operator" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.928271 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e6e49c1-f210-4ee8-af41-9e43123ae910" containerName="marketplace-operator" Nov 26 22:25:35 crc kubenswrapper[4903]: E1126 22:25:35.928279 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a054a1e6-36d5-4c7c-a520-ee213f7f36fa" containerName="extract-content" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.928285 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="a054a1e6-36d5-4c7c-a520-ee213f7f36fa" containerName="extract-content" Nov 26 22:25:35 crc kubenswrapper[4903]: E1126 22:25:35.928291 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a054a1e6-36d5-4c7c-a520-ee213f7f36fa" containerName="registry-server" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.928296 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="a054a1e6-36d5-4c7c-a520-ee213f7f36fa" containerName="registry-server" Nov 26 22:25:35 crc kubenswrapper[4903]: E1126 22:25:35.928303 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7717a36b-65f8-4c9a-b4be-5ab83fe77c99" containerName="extract-utilities" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.928308 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="7717a36b-65f8-4c9a-b4be-5ab83fe77c99" containerName="extract-utilities" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.928399 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee178cda-7bde-4997-8554-e20c3548288b" containerName="registry-server" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.928411 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="44dc303d-e9c6-4ced-9472-715779cd0dba" containerName="registry-server" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.928418 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="a054a1e6-36d5-4c7c-a520-ee213f7f36fa" containerName="registry-server" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.928428 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="7717a36b-65f8-4c9a-b4be-5ab83fe77c99" containerName="registry-server" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.928436 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e6e49c1-f210-4ee8-af41-9e43123ae910" containerName="marketplace-operator" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.929077 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qldpj" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.930745 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.948469 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qldpj"] Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.991562 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-f9lv7" Nov 26 22:25:35 crc kubenswrapper[4903]: I1126 22:25:35.994071 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-f9lv7" Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.033810 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e6e49c1-f210-4ee8-af41-9e43123ae910" path="/var/lib/kubelet/pods/2e6e49c1-f210-4ee8-af41-9e43123ae910/volumes" Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.034365 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44dc303d-e9c6-4ced-9472-715779cd0dba" path="/var/lib/kubelet/pods/44dc303d-e9c6-4ced-9472-715779cd0dba/volumes" Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.034936 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7717a36b-65f8-4c9a-b4be-5ab83fe77c99" path="/var/lib/kubelet/pods/7717a36b-65f8-4c9a-b4be-5ab83fe77c99/volumes" Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.036006 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a054a1e6-36d5-4c7c-a520-ee213f7f36fa" path="/var/lib/kubelet/pods/a054a1e6-36d5-4c7c-a520-ee213f7f36fa/volumes" Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.036559 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee178cda-7bde-4997-8554-e20c3548288b" path="/var/lib/kubelet/pods/ee178cda-7bde-4997-8554-e20c3548288b/volumes" Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.097248 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8679d09-9456-47f4-98b9-db03a62c2224-catalog-content\") pod \"redhat-marketplace-qldpj\" (UID: \"f8679d09-9456-47f4-98b9-db03a62c2224\") " pod="openshift-marketplace/redhat-marketplace-qldpj" Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.097287 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8679d09-9456-47f4-98b9-db03a62c2224-utilities\") pod \"redhat-marketplace-qldpj\" (UID: \"f8679d09-9456-47f4-98b9-db03a62c2224\") " pod="openshift-marketplace/redhat-marketplace-qldpj" Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.097306 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-npgmb\" (UniqueName: \"kubernetes.io/projected/f8679d09-9456-47f4-98b9-db03a62c2224-kube-api-access-npgmb\") pod \"redhat-marketplace-qldpj\" (UID: \"f8679d09-9456-47f4-98b9-db03a62c2224\") " pod="openshift-marketplace/redhat-marketplace-qldpj" Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.135588 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qzmvm"] Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.136526 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qzmvm" Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.139106 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.140713 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qzmvm"] Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.198659 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8679d09-9456-47f4-98b9-db03a62c2224-catalog-content\") pod \"redhat-marketplace-qldpj\" (UID: \"f8679d09-9456-47f4-98b9-db03a62c2224\") " pod="openshift-marketplace/redhat-marketplace-qldpj" Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.198703 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-npgmb\" (UniqueName: \"kubernetes.io/projected/f8679d09-9456-47f4-98b9-db03a62c2224-kube-api-access-npgmb\") pod \"redhat-marketplace-qldpj\" (UID: \"f8679d09-9456-47f4-98b9-db03a62c2224\") " pod="openshift-marketplace/redhat-marketplace-qldpj" Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.198722 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8679d09-9456-47f4-98b9-db03a62c2224-utilities\") pod \"redhat-marketplace-qldpj\" (UID: \"f8679d09-9456-47f4-98b9-db03a62c2224\") " pod="openshift-marketplace/redhat-marketplace-qldpj" Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.199107 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8679d09-9456-47f4-98b9-db03a62c2224-utilities\") pod \"redhat-marketplace-qldpj\" (UID: \"f8679d09-9456-47f4-98b9-db03a62c2224\") " pod="openshift-marketplace/redhat-marketplace-qldpj" Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.199193 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8679d09-9456-47f4-98b9-db03a62c2224-catalog-content\") pod \"redhat-marketplace-qldpj\" (UID: \"f8679d09-9456-47f4-98b9-db03a62c2224\") " pod="openshift-marketplace/redhat-marketplace-qldpj" Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.221831 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-npgmb\" (UniqueName: \"kubernetes.io/projected/f8679d09-9456-47f4-98b9-db03a62c2224-kube-api-access-npgmb\") pod \"redhat-marketplace-qldpj\" (UID: \"f8679d09-9456-47f4-98b9-db03a62c2224\") " pod="openshift-marketplace/redhat-marketplace-qldpj" Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.260467 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qldpj" Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.300338 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a9c3908-6825-461f-894f-f187b429fece-catalog-content\") pod \"certified-operators-qzmvm\" (UID: \"3a9c3908-6825-461f-894f-f187b429fece\") " pod="openshift-marketplace/certified-operators-qzmvm" Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.300452 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a9c3908-6825-461f-894f-f187b429fece-utilities\") pod \"certified-operators-qzmvm\" (UID: \"3a9c3908-6825-461f-894f-f187b429fece\") " pod="openshift-marketplace/certified-operators-qzmvm" Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.300534 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dfsv9\" (UniqueName: \"kubernetes.io/projected/3a9c3908-6825-461f-894f-f187b429fece-kube-api-access-dfsv9\") pod \"certified-operators-qzmvm\" (UID: \"3a9c3908-6825-461f-894f-f187b429fece\") " pod="openshift-marketplace/certified-operators-qzmvm" Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.401353 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a9c3908-6825-461f-894f-f187b429fece-catalog-content\") pod \"certified-operators-qzmvm\" (UID: \"3a9c3908-6825-461f-894f-f187b429fece\") " pod="openshift-marketplace/certified-operators-qzmvm" Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.401631 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a9c3908-6825-461f-894f-f187b429fece-utilities\") pod \"certified-operators-qzmvm\" (UID: \"3a9c3908-6825-461f-894f-f187b429fece\") " pod="openshift-marketplace/certified-operators-qzmvm" Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.401672 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dfsv9\" (UniqueName: \"kubernetes.io/projected/3a9c3908-6825-461f-894f-f187b429fece-kube-api-access-dfsv9\") pod \"certified-operators-qzmvm\" (UID: \"3a9c3908-6825-461f-894f-f187b429fece\") " pod="openshift-marketplace/certified-operators-qzmvm" Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.401870 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a9c3908-6825-461f-894f-f187b429fece-catalog-content\") pod \"certified-operators-qzmvm\" (UID: \"3a9c3908-6825-461f-894f-f187b429fece\") " pod="openshift-marketplace/certified-operators-qzmvm" Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.402099 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a9c3908-6825-461f-894f-f187b429fece-utilities\") pod \"certified-operators-qzmvm\" (UID: \"3a9c3908-6825-461f-894f-f187b429fece\") " pod="openshift-marketplace/certified-operators-qzmvm" Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.424931 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dfsv9\" (UniqueName: \"kubernetes.io/projected/3a9c3908-6825-461f-894f-f187b429fece-kube-api-access-dfsv9\") pod \"certified-operators-qzmvm\" (UID: \"3a9c3908-6825-461f-894f-f187b429fece\") " pod="openshift-marketplace/certified-operators-qzmvm" Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.458330 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qzmvm" Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.656950 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qldpj"] Nov 26 22:25:36 crc kubenswrapper[4903]: W1126 22:25:36.665493 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf8679d09_9456_47f4_98b9_db03a62c2224.slice/crio-7b978cac0654313010d53d355cca8aa6aff00b9077230caba2271023d2504176 WatchSource:0}: Error finding container 7b978cac0654313010d53d355cca8aa6aff00b9077230caba2271023d2504176: Status 404 returned error can't find the container with id 7b978cac0654313010d53d355cca8aa6aff00b9077230caba2271023d2504176 Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.846640 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qzmvm"] Nov 26 22:25:36 crc kubenswrapper[4903]: W1126 22:25:36.905134 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3a9c3908_6825_461f_894f_f187b429fece.slice/crio-a326e3d960afb74a0a00472fac7036e34a2391ee053f6c863071f5c082e3f98a WatchSource:0}: Error finding container a326e3d960afb74a0a00472fac7036e34a2391ee053f6c863071f5c082e3f98a: Status 404 returned error can't find the container with id a326e3d960afb74a0a00472fac7036e34a2391ee053f6c863071f5c082e3f98a Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.997915 4903 generic.go:334] "Generic (PLEG): container finished" podID="f8679d09-9456-47f4-98b9-db03a62c2224" containerID="2fc4a349cef153eda47349165820c10f73512289fb66fb33140928c080b79490" exitCode=0 Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.997980 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qldpj" event={"ID":"f8679d09-9456-47f4-98b9-db03a62c2224","Type":"ContainerDied","Data":"2fc4a349cef153eda47349165820c10f73512289fb66fb33140928c080b79490"} Nov 26 22:25:36 crc kubenswrapper[4903]: I1126 22:25:36.998005 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qldpj" event={"ID":"f8679d09-9456-47f4-98b9-db03a62c2224","Type":"ContainerStarted","Data":"7b978cac0654313010d53d355cca8aa6aff00b9077230caba2271023d2504176"} Nov 26 22:25:37 crc kubenswrapper[4903]: I1126 22:25:37.000214 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qzmvm" event={"ID":"3a9c3908-6825-461f-894f-f187b429fece","Type":"ContainerStarted","Data":"a326e3d960afb74a0a00472fac7036e34a2391ee053f6c863071f5c082e3f98a"} Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.009008 4903 generic.go:334] "Generic (PLEG): container finished" podID="3a9c3908-6825-461f-894f-f187b429fece" containerID="4fb69257b865a27fe0786c7c785f72c9242eb862560eca51d4aa0c9a83122c15" exitCode=0 Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.009117 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qzmvm" event={"ID":"3a9c3908-6825-461f-894f-f187b429fece","Type":"ContainerDied","Data":"4fb69257b865a27fe0786c7c785f72c9242eb862560eca51d4aa0c9a83122c15"} Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.042149 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qldpj" event={"ID":"f8679d09-9456-47f4-98b9-db03a62c2224","Type":"ContainerStarted","Data":"454fe8784ecfeb3cbc5f390e858130715755517fd003291c823009f62a98c1ab"} Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.331350 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-khkfx"] Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.334073 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-khkfx" Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.339030 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-khkfx"] Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.340944 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.425086 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hc2qx\" (UniqueName: \"kubernetes.io/projected/ca8619ad-8673-4b83-907d-e274c4cd11ac-kube-api-access-hc2qx\") pod \"redhat-operators-khkfx\" (UID: \"ca8619ad-8673-4b83-907d-e274c4cd11ac\") " pod="openshift-marketplace/redhat-operators-khkfx" Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.425632 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ca8619ad-8673-4b83-907d-e274c4cd11ac-utilities\") pod \"redhat-operators-khkfx\" (UID: \"ca8619ad-8673-4b83-907d-e274c4cd11ac\") " pod="openshift-marketplace/redhat-operators-khkfx" Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.425934 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ca8619ad-8673-4b83-907d-e274c4cd11ac-catalog-content\") pod \"redhat-operators-khkfx\" (UID: \"ca8619ad-8673-4b83-907d-e274c4cd11ac\") " pod="openshift-marketplace/redhat-operators-khkfx" Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.526636 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rtxgl"] Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.528426 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rtxgl" Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.531244 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hc2qx\" (UniqueName: \"kubernetes.io/projected/ca8619ad-8673-4b83-907d-e274c4cd11ac-kube-api-access-hc2qx\") pod \"redhat-operators-khkfx\" (UID: \"ca8619ad-8673-4b83-907d-e274c4cd11ac\") " pod="openshift-marketplace/redhat-operators-khkfx" Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.531569 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ca8619ad-8673-4b83-907d-e274c4cd11ac-utilities\") pod \"redhat-operators-khkfx\" (UID: \"ca8619ad-8673-4b83-907d-e274c4cd11ac\") " pod="openshift-marketplace/redhat-operators-khkfx" Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.531872 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ca8619ad-8673-4b83-907d-e274c4cd11ac-catalog-content\") pod \"redhat-operators-khkfx\" (UID: \"ca8619ad-8673-4b83-907d-e274c4cd11ac\") " pod="openshift-marketplace/redhat-operators-khkfx" Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.532993 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ca8619ad-8673-4b83-907d-e274c4cd11ac-catalog-content\") pod \"redhat-operators-khkfx\" (UID: \"ca8619ad-8673-4b83-907d-e274c4cd11ac\") " pod="openshift-marketplace/redhat-operators-khkfx" Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.533743 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ca8619ad-8673-4b83-907d-e274c4cd11ac-utilities\") pod \"redhat-operators-khkfx\" (UID: \"ca8619ad-8673-4b83-907d-e274c4cd11ac\") " pod="openshift-marketplace/redhat-operators-khkfx" Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.534014 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.540763 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rtxgl"] Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.558040 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hc2qx\" (UniqueName: \"kubernetes.io/projected/ca8619ad-8673-4b83-907d-e274c4cd11ac-kube-api-access-hc2qx\") pod \"redhat-operators-khkfx\" (UID: \"ca8619ad-8673-4b83-907d-e274c4cd11ac\") " pod="openshift-marketplace/redhat-operators-khkfx" Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.633668 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63d08938-8b23-4efc-a3d6-ff8fccfb45e4-catalog-content\") pod \"community-operators-rtxgl\" (UID: \"63d08938-8b23-4efc-a3d6-ff8fccfb45e4\") " pod="openshift-marketplace/community-operators-rtxgl" Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.633991 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8w9kl\" (UniqueName: \"kubernetes.io/projected/63d08938-8b23-4efc-a3d6-ff8fccfb45e4-kube-api-access-8w9kl\") pod \"community-operators-rtxgl\" (UID: \"63d08938-8b23-4efc-a3d6-ff8fccfb45e4\") " pod="openshift-marketplace/community-operators-rtxgl" Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.634017 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63d08938-8b23-4efc-a3d6-ff8fccfb45e4-utilities\") pod \"community-operators-rtxgl\" (UID: \"63d08938-8b23-4efc-a3d6-ff8fccfb45e4\") " pod="openshift-marketplace/community-operators-rtxgl" Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.669130 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-khkfx" Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.735377 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63d08938-8b23-4efc-a3d6-ff8fccfb45e4-catalog-content\") pod \"community-operators-rtxgl\" (UID: \"63d08938-8b23-4efc-a3d6-ff8fccfb45e4\") " pod="openshift-marketplace/community-operators-rtxgl" Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.735423 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8w9kl\" (UniqueName: \"kubernetes.io/projected/63d08938-8b23-4efc-a3d6-ff8fccfb45e4-kube-api-access-8w9kl\") pod \"community-operators-rtxgl\" (UID: \"63d08938-8b23-4efc-a3d6-ff8fccfb45e4\") " pod="openshift-marketplace/community-operators-rtxgl" Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.735447 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63d08938-8b23-4efc-a3d6-ff8fccfb45e4-utilities\") pod \"community-operators-rtxgl\" (UID: \"63d08938-8b23-4efc-a3d6-ff8fccfb45e4\") " pod="openshift-marketplace/community-operators-rtxgl" Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.735862 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63d08938-8b23-4efc-a3d6-ff8fccfb45e4-catalog-content\") pod \"community-operators-rtxgl\" (UID: \"63d08938-8b23-4efc-a3d6-ff8fccfb45e4\") " pod="openshift-marketplace/community-operators-rtxgl" Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.735893 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63d08938-8b23-4efc-a3d6-ff8fccfb45e4-utilities\") pod \"community-operators-rtxgl\" (UID: \"63d08938-8b23-4efc-a3d6-ff8fccfb45e4\") " pod="openshift-marketplace/community-operators-rtxgl" Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.751845 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8w9kl\" (UniqueName: \"kubernetes.io/projected/63d08938-8b23-4efc-a3d6-ff8fccfb45e4-kube-api-access-8w9kl\") pod \"community-operators-rtxgl\" (UID: \"63d08938-8b23-4efc-a3d6-ff8fccfb45e4\") " pod="openshift-marketplace/community-operators-rtxgl" Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.856988 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-khkfx"] Nov 26 22:25:38 crc kubenswrapper[4903]: I1126 22:25:38.916726 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rtxgl" Nov 26 22:25:39 crc kubenswrapper[4903]: I1126 22:25:39.035518 4903 generic.go:334] "Generic (PLEG): container finished" podID="f8679d09-9456-47f4-98b9-db03a62c2224" containerID="454fe8784ecfeb3cbc5f390e858130715755517fd003291c823009f62a98c1ab" exitCode=0 Nov 26 22:25:39 crc kubenswrapper[4903]: I1126 22:25:39.035589 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qldpj" event={"ID":"f8679d09-9456-47f4-98b9-db03a62c2224","Type":"ContainerDied","Data":"454fe8784ecfeb3cbc5f390e858130715755517fd003291c823009f62a98c1ab"} Nov 26 22:25:39 crc kubenswrapper[4903]: I1126 22:25:39.035633 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qldpj" event={"ID":"f8679d09-9456-47f4-98b9-db03a62c2224","Type":"ContainerStarted","Data":"3ce3be4cb41de0b249d3874e95420b737f6e35d248f08b1dbae4bfe9435d756a"} Nov 26 22:25:39 crc kubenswrapper[4903]: I1126 22:25:39.039490 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qzmvm" event={"ID":"3a9c3908-6825-461f-894f-f187b429fece","Type":"ContainerStarted","Data":"4c80e03eac8624a843ef94901ada3916cd8ab79b27e1165758b3f003e156b3cf"} Nov 26 22:25:39 crc kubenswrapper[4903]: I1126 22:25:39.041157 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-khkfx" event={"ID":"ca8619ad-8673-4b83-907d-e274c4cd11ac","Type":"ContainerStarted","Data":"f2540d6cc212f3c08e2b414e4fe700b233f77f38287b7ce47a54d4c94272c7bf"} Nov 26 22:25:39 crc kubenswrapper[4903]: I1126 22:25:39.041200 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-khkfx" event={"ID":"ca8619ad-8673-4b83-907d-e274c4cd11ac","Type":"ContainerStarted","Data":"4bb5e7936357f86fc57337a502610c13f2ef06b561044dd59e1880f7ad5f47a9"} Nov 26 22:25:39 crc kubenswrapper[4903]: I1126 22:25:39.052591 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qldpj" podStartSLOduration=2.596844791 podStartE2EDuration="4.05257419s" podCreationTimestamp="2025-11-26 22:25:35 +0000 UTC" firstStartedPulling="2025-11-26 22:25:37.000760437 +0000 UTC m=+265.690995357" lastFinishedPulling="2025-11-26 22:25:38.456489806 +0000 UTC m=+267.146724756" observedRunningTime="2025-11-26 22:25:39.050172445 +0000 UTC m=+267.740407355" watchObservedRunningTime="2025-11-26 22:25:39.05257419 +0000 UTC m=+267.742809100" Nov 26 22:25:39 crc kubenswrapper[4903]: I1126 22:25:39.406943 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rtxgl"] Nov 26 22:25:40 crc kubenswrapper[4903]: I1126 22:25:40.046853 4903 generic.go:334] "Generic (PLEG): container finished" podID="3a9c3908-6825-461f-894f-f187b429fece" containerID="4c80e03eac8624a843ef94901ada3916cd8ab79b27e1165758b3f003e156b3cf" exitCode=0 Nov 26 22:25:40 crc kubenswrapper[4903]: I1126 22:25:40.046900 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qzmvm" event={"ID":"3a9c3908-6825-461f-894f-f187b429fece","Type":"ContainerDied","Data":"4c80e03eac8624a843ef94901ada3916cd8ab79b27e1165758b3f003e156b3cf"} Nov 26 22:25:40 crc kubenswrapper[4903]: I1126 22:25:40.050520 4903 generic.go:334] "Generic (PLEG): container finished" podID="ca8619ad-8673-4b83-907d-e274c4cd11ac" containerID="f2540d6cc212f3c08e2b414e4fe700b233f77f38287b7ce47a54d4c94272c7bf" exitCode=0 Nov 26 22:25:40 crc kubenswrapper[4903]: I1126 22:25:40.050597 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-khkfx" event={"ID":"ca8619ad-8673-4b83-907d-e274c4cd11ac","Type":"ContainerDied","Data":"f2540d6cc212f3c08e2b414e4fe700b233f77f38287b7ce47a54d4c94272c7bf"} Nov 26 22:25:40 crc kubenswrapper[4903]: I1126 22:25:40.050649 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-khkfx" event={"ID":"ca8619ad-8673-4b83-907d-e274c4cd11ac","Type":"ContainerStarted","Data":"1cc890d6fde168409a6b34171524e971237ca625b47f784dbc33b1302738f910"} Nov 26 22:25:40 crc kubenswrapper[4903]: I1126 22:25:40.052004 4903 generic.go:334] "Generic (PLEG): container finished" podID="63d08938-8b23-4efc-a3d6-ff8fccfb45e4" containerID="31c91ceaf7db193abff6bcd618c2f2219be6524ef3f5d7961f79df02de5ee435" exitCode=0 Nov 26 22:25:40 crc kubenswrapper[4903]: I1126 22:25:40.052087 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rtxgl" event={"ID":"63d08938-8b23-4efc-a3d6-ff8fccfb45e4","Type":"ContainerDied","Data":"31c91ceaf7db193abff6bcd618c2f2219be6524ef3f5d7961f79df02de5ee435"} Nov 26 22:25:40 crc kubenswrapper[4903]: I1126 22:25:40.052124 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rtxgl" event={"ID":"63d08938-8b23-4efc-a3d6-ff8fccfb45e4","Type":"ContainerStarted","Data":"6b66eb36e331ccb3542168d00c757edbd10f2abe1098313bb2b034d50236f25f"} Nov 26 22:25:41 crc kubenswrapper[4903]: I1126 22:25:41.066008 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qzmvm" event={"ID":"3a9c3908-6825-461f-894f-f187b429fece","Type":"ContainerStarted","Data":"94039a700b052d3aaf1096120b85b898ed1433dbe63ae1abb0a43568265312f7"} Nov 26 22:25:41 crc kubenswrapper[4903]: I1126 22:25:41.067321 4903 generic.go:334] "Generic (PLEG): container finished" podID="ca8619ad-8673-4b83-907d-e274c4cd11ac" containerID="1cc890d6fde168409a6b34171524e971237ca625b47f784dbc33b1302738f910" exitCode=0 Nov 26 22:25:41 crc kubenswrapper[4903]: I1126 22:25:41.067360 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-khkfx" event={"ID":"ca8619ad-8673-4b83-907d-e274c4cd11ac","Type":"ContainerDied","Data":"1cc890d6fde168409a6b34171524e971237ca625b47f784dbc33b1302738f910"} Nov 26 22:25:41 crc kubenswrapper[4903]: I1126 22:25:41.071287 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rtxgl" event={"ID":"63d08938-8b23-4efc-a3d6-ff8fccfb45e4","Type":"ContainerStarted","Data":"a4d462645d2bf479274590d249aaca46da87b30d133681033db52dd3371dcf00"} Nov 26 22:25:41 crc kubenswrapper[4903]: I1126 22:25:41.086409 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qzmvm" podStartSLOduration=2.589003379 podStartE2EDuration="5.086393272s" podCreationTimestamp="2025-11-26 22:25:36 +0000 UTC" firstStartedPulling="2025-11-26 22:25:38.010769813 +0000 UTC m=+266.701004723" lastFinishedPulling="2025-11-26 22:25:40.508159706 +0000 UTC m=+269.198394616" observedRunningTime="2025-11-26 22:25:41.085557799 +0000 UTC m=+269.775792709" watchObservedRunningTime="2025-11-26 22:25:41.086393272 +0000 UTC m=+269.776628182" Nov 26 22:25:42 crc kubenswrapper[4903]: I1126 22:25:42.078214 4903 generic.go:334] "Generic (PLEG): container finished" podID="63d08938-8b23-4efc-a3d6-ff8fccfb45e4" containerID="a4d462645d2bf479274590d249aaca46da87b30d133681033db52dd3371dcf00" exitCode=0 Nov 26 22:25:42 crc kubenswrapper[4903]: I1126 22:25:42.078533 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rtxgl" event={"ID":"63d08938-8b23-4efc-a3d6-ff8fccfb45e4","Type":"ContainerDied","Data":"a4d462645d2bf479274590d249aaca46da87b30d133681033db52dd3371dcf00"} Nov 26 22:25:42 crc kubenswrapper[4903]: I1126 22:25:42.085923 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-khkfx" event={"ID":"ca8619ad-8673-4b83-907d-e274c4cd11ac","Type":"ContainerStarted","Data":"9b152c881b5dc89169aed3296983b9776809c28fd2d1541938f241e5ad76c52c"} Nov 26 22:25:42 crc kubenswrapper[4903]: I1126 22:25:42.109146 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-khkfx" podStartSLOduration=1.5809710030000002 podStartE2EDuration="4.109121377s" podCreationTimestamp="2025-11-26 22:25:38 +0000 UTC" firstStartedPulling="2025-11-26 22:25:39.042288239 +0000 UTC m=+267.732523139" lastFinishedPulling="2025-11-26 22:25:41.570438603 +0000 UTC m=+270.260673513" observedRunningTime="2025-11-26 22:25:42.104688016 +0000 UTC m=+270.794922936" watchObservedRunningTime="2025-11-26 22:25:42.109121377 +0000 UTC m=+270.799356297" Nov 26 22:25:43 crc kubenswrapper[4903]: I1126 22:25:43.092721 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rtxgl" event={"ID":"63d08938-8b23-4efc-a3d6-ff8fccfb45e4","Type":"ContainerStarted","Data":"bb293a9a89edfa818b7c4fcd5402e6e1f797f5bfe9cfc0d9ea5acbffbbdd91ad"} Nov 26 22:25:43 crc kubenswrapper[4903]: I1126 22:25:43.114518 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rtxgl" podStartSLOduration=2.668649275 podStartE2EDuration="5.114500917s" podCreationTimestamp="2025-11-26 22:25:38 +0000 UTC" firstStartedPulling="2025-11-26 22:25:40.054585849 +0000 UTC m=+268.744820769" lastFinishedPulling="2025-11-26 22:25:42.500437491 +0000 UTC m=+271.190672411" observedRunningTime="2025-11-26 22:25:43.114396674 +0000 UTC m=+271.804631584" watchObservedRunningTime="2025-11-26 22:25:43.114500917 +0000 UTC m=+271.804735837" Nov 26 22:25:46 crc kubenswrapper[4903]: I1126 22:25:46.261458 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-qldpj" Nov 26 22:25:46 crc kubenswrapper[4903]: I1126 22:25:46.262565 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-qldpj" Nov 26 22:25:46 crc kubenswrapper[4903]: I1126 22:25:46.322915 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-qldpj" Nov 26 22:25:46 crc kubenswrapper[4903]: I1126 22:25:46.458954 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qzmvm" Nov 26 22:25:46 crc kubenswrapper[4903]: I1126 22:25:46.458991 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qzmvm" Nov 26 22:25:46 crc kubenswrapper[4903]: I1126 22:25:46.497056 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qzmvm" Nov 26 22:25:47 crc kubenswrapper[4903]: I1126 22:25:47.150022 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-qldpj" Nov 26 22:25:47 crc kubenswrapper[4903]: I1126 22:25:47.188342 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qzmvm" Nov 26 22:25:48 crc kubenswrapper[4903]: I1126 22:25:48.670122 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-khkfx" Nov 26 22:25:48 crc kubenswrapper[4903]: I1126 22:25:48.670386 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-khkfx" Nov 26 22:25:48 crc kubenswrapper[4903]: I1126 22:25:48.739074 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-khkfx" Nov 26 22:25:48 crc kubenswrapper[4903]: I1126 22:25:48.918138 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-rtxgl" Nov 26 22:25:48 crc kubenswrapper[4903]: I1126 22:25:48.918394 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rtxgl" Nov 26 22:25:48 crc kubenswrapper[4903]: I1126 22:25:48.975415 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rtxgl" Nov 26 22:25:49 crc kubenswrapper[4903]: I1126 22:25:49.161348 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rtxgl" Nov 26 22:25:49 crc kubenswrapper[4903]: I1126 22:25:49.189121 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-khkfx" Nov 26 22:26:04 crc kubenswrapper[4903]: I1126 22:26:04.022632 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-h4fkt"] Nov 26 22:26:04 crc kubenswrapper[4903]: I1126 22:26:04.024220 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-h4fkt" Nov 26 22:26:04 crc kubenswrapper[4903]: I1126 22:26:04.027632 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"cluster-monitoring-operator-tls" Nov 26 22:26:04 crc kubenswrapper[4903]: I1126 22:26:04.027918 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"openshift-service-ca.crt" Nov 26 22:26:04 crc kubenswrapper[4903]: I1126 22:26:04.028049 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"cluster-monitoring-operator-dockercfg-wwt9l" Nov 26 22:26:04 crc kubenswrapper[4903]: I1126 22:26:04.029601 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kube-root-ca.crt" Nov 26 22:26:04 crc kubenswrapper[4903]: I1126 22:26:04.030516 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"telemetry-config" Nov 26 22:26:04 crc kubenswrapper[4903]: I1126 22:26:04.057418 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-h4fkt"] Nov 26 22:26:04 crc kubenswrapper[4903]: I1126 22:26:04.168182 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/ac22707d-cdf8-45d9-b377-34f9bde78c80-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-h4fkt\" (UID: \"ac22707d-cdf8-45d9-b377-34f9bde78c80\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-h4fkt" Nov 26 22:26:04 crc kubenswrapper[4903]: I1126 22:26:04.168279 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/ac22707d-cdf8-45d9-b377-34f9bde78c80-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-h4fkt\" (UID: \"ac22707d-cdf8-45d9-b377-34f9bde78c80\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-h4fkt" Nov 26 22:26:04 crc kubenswrapper[4903]: I1126 22:26:04.168828 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n8xpv\" (UniqueName: \"kubernetes.io/projected/ac22707d-cdf8-45d9-b377-34f9bde78c80-kube-api-access-n8xpv\") pod \"cluster-monitoring-operator-6d5b84845-h4fkt\" (UID: \"ac22707d-cdf8-45d9-b377-34f9bde78c80\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-h4fkt" Nov 26 22:26:04 crc kubenswrapper[4903]: I1126 22:26:04.270405 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n8xpv\" (UniqueName: \"kubernetes.io/projected/ac22707d-cdf8-45d9-b377-34f9bde78c80-kube-api-access-n8xpv\") pod \"cluster-monitoring-operator-6d5b84845-h4fkt\" (UID: \"ac22707d-cdf8-45d9-b377-34f9bde78c80\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-h4fkt" Nov 26 22:26:04 crc kubenswrapper[4903]: I1126 22:26:04.270514 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/ac22707d-cdf8-45d9-b377-34f9bde78c80-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-h4fkt\" (UID: \"ac22707d-cdf8-45d9-b377-34f9bde78c80\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-h4fkt" Nov 26 22:26:04 crc kubenswrapper[4903]: I1126 22:26:04.270555 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/ac22707d-cdf8-45d9-b377-34f9bde78c80-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-h4fkt\" (UID: \"ac22707d-cdf8-45d9-b377-34f9bde78c80\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-h4fkt" Nov 26 22:26:04 crc kubenswrapper[4903]: I1126 22:26:04.272756 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/ac22707d-cdf8-45d9-b377-34f9bde78c80-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-h4fkt\" (UID: \"ac22707d-cdf8-45d9-b377-34f9bde78c80\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-h4fkt" Nov 26 22:26:04 crc kubenswrapper[4903]: I1126 22:26:04.282000 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/ac22707d-cdf8-45d9-b377-34f9bde78c80-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-h4fkt\" (UID: \"ac22707d-cdf8-45d9-b377-34f9bde78c80\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-h4fkt" Nov 26 22:26:04 crc kubenswrapper[4903]: I1126 22:26:04.306585 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n8xpv\" (UniqueName: \"kubernetes.io/projected/ac22707d-cdf8-45d9-b377-34f9bde78c80-kube-api-access-n8xpv\") pod \"cluster-monitoring-operator-6d5b84845-h4fkt\" (UID: \"ac22707d-cdf8-45d9-b377-34f9bde78c80\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-h4fkt" Nov 26 22:26:04 crc kubenswrapper[4903]: I1126 22:26:04.359213 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-h4fkt" Nov 26 22:26:04 crc kubenswrapper[4903]: I1126 22:26:04.611662 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-h4fkt"] Nov 26 22:26:04 crc kubenswrapper[4903]: W1126 22:26:04.619924 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podac22707d_cdf8_45d9_b377_34f9bde78c80.slice/crio-8926f4791360d6393f3ac41e1fbe11157aac6457616eb2c2bf5d9bf55101b875 WatchSource:0}: Error finding container 8926f4791360d6393f3ac41e1fbe11157aac6457616eb2c2bf5d9bf55101b875: Status 404 returned error can't find the container with id 8926f4791360d6393f3ac41e1fbe11157aac6457616eb2c2bf5d9bf55101b875 Nov 26 22:26:05 crc kubenswrapper[4903]: I1126 22:26:05.206135 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-h4fkt" event={"ID":"ac22707d-cdf8-45d9-b377-34f9bde78c80","Type":"ContainerStarted","Data":"8926f4791360d6393f3ac41e1fbe11157aac6457616eb2c2bf5d9bf55101b875"} Nov 26 22:26:07 crc kubenswrapper[4903]: I1126 22:26:07.926160 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-l9g4x"] Nov 26 22:26:07 crc kubenswrapper[4903]: I1126 22:26:07.927467 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-l9g4x" Nov 26 22:26:07 crc kubenswrapper[4903]: I1126 22:26:07.945820 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-l9g4x"] Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.031942 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/1d3ead7e-7e01-4ffe-8155-116cad10e5e6-ca-trust-extracted\") pod \"image-registry-66df7c8f76-l9g4x\" (UID: \"1d3ead7e-7e01-4ffe-8155-116cad10e5e6\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9g4x" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.032006 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/1d3ead7e-7e01-4ffe-8155-116cad10e5e6-installation-pull-secrets\") pod \"image-registry-66df7c8f76-l9g4x\" (UID: \"1d3ead7e-7e01-4ffe-8155-116cad10e5e6\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9g4x" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.032027 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ssxl\" (UniqueName: \"kubernetes.io/projected/1d3ead7e-7e01-4ffe-8155-116cad10e5e6-kube-api-access-5ssxl\") pod \"image-registry-66df7c8f76-l9g4x\" (UID: \"1d3ead7e-7e01-4ffe-8155-116cad10e5e6\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9g4x" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.032051 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/1d3ead7e-7e01-4ffe-8155-116cad10e5e6-registry-certificates\") pod \"image-registry-66df7c8f76-l9g4x\" (UID: \"1d3ead7e-7e01-4ffe-8155-116cad10e5e6\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9g4x" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.032079 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1d3ead7e-7e01-4ffe-8155-116cad10e5e6-trusted-ca\") pod \"image-registry-66df7c8f76-l9g4x\" (UID: \"1d3ead7e-7e01-4ffe-8155-116cad10e5e6\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9g4x" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.032125 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-l9g4x\" (UID: \"1d3ead7e-7e01-4ffe-8155-116cad10e5e6\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9g4x" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.032150 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/1d3ead7e-7e01-4ffe-8155-116cad10e5e6-registry-tls\") pod \"image-registry-66df7c8f76-l9g4x\" (UID: \"1d3ead7e-7e01-4ffe-8155-116cad10e5e6\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9g4x" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.032184 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1d3ead7e-7e01-4ffe-8155-116cad10e5e6-bound-sa-token\") pod \"image-registry-66df7c8f76-l9g4x\" (UID: \"1d3ead7e-7e01-4ffe-8155-116cad10e5e6\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9g4x" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.046929 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-qhkfh"] Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.047511 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-qhkfh" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.049432 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-dockercfg-kw2jj" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.049987 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-tls" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.063989 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-qhkfh"] Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.088040 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-l9g4x\" (UID: \"1d3ead7e-7e01-4ffe-8155-116cad10e5e6\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9g4x" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.133683 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1d3ead7e-7e01-4ffe-8155-116cad10e5e6-trusted-ca\") pod \"image-registry-66df7c8f76-l9g4x\" (UID: \"1d3ead7e-7e01-4ffe-8155-116cad10e5e6\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9g4x" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.133770 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/353697f8-aecd-4abf-b7a4-5f0f2f536ca9-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-qhkfh\" (UID: \"353697f8-aecd-4abf-b7a4-5f0f2f536ca9\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-qhkfh" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.133808 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/1d3ead7e-7e01-4ffe-8155-116cad10e5e6-registry-tls\") pod \"image-registry-66df7c8f76-l9g4x\" (UID: \"1d3ead7e-7e01-4ffe-8155-116cad10e5e6\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9g4x" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.133843 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1d3ead7e-7e01-4ffe-8155-116cad10e5e6-bound-sa-token\") pod \"image-registry-66df7c8f76-l9g4x\" (UID: \"1d3ead7e-7e01-4ffe-8155-116cad10e5e6\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9g4x" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.133883 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/1d3ead7e-7e01-4ffe-8155-116cad10e5e6-ca-trust-extracted\") pod \"image-registry-66df7c8f76-l9g4x\" (UID: \"1d3ead7e-7e01-4ffe-8155-116cad10e5e6\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9g4x" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.133922 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/1d3ead7e-7e01-4ffe-8155-116cad10e5e6-installation-pull-secrets\") pod \"image-registry-66df7c8f76-l9g4x\" (UID: \"1d3ead7e-7e01-4ffe-8155-116cad10e5e6\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9g4x" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.133937 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ssxl\" (UniqueName: \"kubernetes.io/projected/1d3ead7e-7e01-4ffe-8155-116cad10e5e6-kube-api-access-5ssxl\") pod \"image-registry-66df7c8f76-l9g4x\" (UID: \"1d3ead7e-7e01-4ffe-8155-116cad10e5e6\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9g4x" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.133960 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/1d3ead7e-7e01-4ffe-8155-116cad10e5e6-registry-certificates\") pod \"image-registry-66df7c8f76-l9g4x\" (UID: \"1d3ead7e-7e01-4ffe-8155-116cad10e5e6\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9g4x" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.135296 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/1d3ead7e-7e01-4ffe-8155-116cad10e5e6-ca-trust-extracted\") pod \"image-registry-66df7c8f76-l9g4x\" (UID: \"1d3ead7e-7e01-4ffe-8155-116cad10e5e6\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9g4x" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.137300 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1d3ead7e-7e01-4ffe-8155-116cad10e5e6-trusted-ca\") pod \"image-registry-66df7c8f76-l9g4x\" (UID: \"1d3ead7e-7e01-4ffe-8155-116cad10e5e6\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9g4x" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.138850 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/1d3ead7e-7e01-4ffe-8155-116cad10e5e6-registry-certificates\") pod \"image-registry-66df7c8f76-l9g4x\" (UID: \"1d3ead7e-7e01-4ffe-8155-116cad10e5e6\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9g4x" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.140170 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/1d3ead7e-7e01-4ffe-8155-116cad10e5e6-registry-tls\") pod \"image-registry-66df7c8f76-l9g4x\" (UID: \"1d3ead7e-7e01-4ffe-8155-116cad10e5e6\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9g4x" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.140683 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/1d3ead7e-7e01-4ffe-8155-116cad10e5e6-installation-pull-secrets\") pod \"image-registry-66df7c8f76-l9g4x\" (UID: \"1d3ead7e-7e01-4ffe-8155-116cad10e5e6\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9g4x" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.152310 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ssxl\" (UniqueName: \"kubernetes.io/projected/1d3ead7e-7e01-4ffe-8155-116cad10e5e6-kube-api-access-5ssxl\") pod \"image-registry-66df7c8f76-l9g4x\" (UID: \"1d3ead7e-7e01-4ffe-8155-116cad10e5e6\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9g4x" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.153982 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1d3ead7e-7e01-4ffe-8155-116cad10e5e6-bound-sa-token\") pod \"image-registry-66df7c8f76-l9g4x\" (UID: \"1d3ead7e-7e01-4ffe-8155-116cad10e5e6\") " pod="openshift-image-registry/image-registry-66df7c8f76-l9g4x" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.223755 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-h4fkt" event={"ID":"ac22707d-cdf8-45d9-b377-34f9bde78c80","Type":"ContainerStarted","Data":"4fa684b57dc9f1c5606f6cdc8e722cb0d138318029f22a4b6efacc2bfae94e4b"} Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.234804 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/353697f8-aecd-4abf-b7a4-5f0f2f536ca9-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-qhkfh\" (UID: \"353697f8-aecd-4abf-b7a4-5f0f2f536ca9\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-qhkfh" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.237179 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-h4fkt" podStartSLOduration=2.544365817 podStartE2EDuration="5.237162753s" podCreationTimestamp="2025-11-26 22:26:03 +0000 UTC" firstStartedPulling="2025-11-26 22:26:04.622522372 +0000 UTC m=+293.312757302" lastFinishedPulling="2025-11-26 22:26:07.315319298 +0000 UTC m=+296.005554238" observedRunningTime="2025-11-26 22:26:08.234540461 +0000 UTC m=+296.924775371" watchObservedRunningTime="2025-11-26 22:26:08.237162753 +0000 UTC m=+296.927397663" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.239055 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/353697f8-aecd-4abf-b7a4-5f0f2f536ca9-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-qhkfh\" (UID: \"353697f8-aecd-4abf-b7a4-5f0f2f536ca9\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-qhkfh" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.247931 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-l9g4x" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.359401 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-qhkfh" Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.555211 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-qhkfh"] Nov 26 22:26:08 crc kubenswrapper[4903]: W1126 22:26:08.563402 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod353697f8_aecd_4abf_b7a4_5f0f2f536ca9.slice/crio-3a0723b16e65c39502e85eebd2758fdb8c353b19ed48d8f376b87846ebd9a085 WatchSource:0}: Error finding container 3a0723b16e65c39502e85eebd2758fdb8c353b19ed48d8f376b87846ebd9a085: Status 404 returned error can't find the container with id 3a0723b16e65c39502e85eebd2758fdb8c353b19ed48d8f376b87846ebd9a085 Nov 26 22:26:08 crc kubenswrapper[4903]: I1126 22:26:08.687998 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-l9g4x"] Nov 26 22:26:09 crc kubenswrapper[4903]: I1126 22:26:09.232775 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-qhkfh" event={"ID":"353697f8-aecd-4abf-b7a4-5f0f2f536ca9","Type":"ContainerStarted","Data":"3a0723b16e65c39502e85eebd2758fdb8c353b19ed48d8f376b87846ebd9a085"} Nov 26 22:26:09 crc kubenswrapper[4903]: I1126 22:26:09.235199 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-l9g4x" event={"ID":"1d3ead7e-7e01-4ffe-8155-116cad10e5e6","Type":"ContainerStarted","Data":"0ac8ee4667d236eff27c005d7ff87ecbd9fcd0ab187782cdc063a86299b9ce94"} Nov 26 22:26:09 crc kubenswrapper[4903]: I1126 22:26:09.235266 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-l9g4x" event={"ID":"1d3ead7e-7e01-4ffe-8155-116cad10e5e6","Type":"ContainerStarted","Data":"3d4f8a44b2c374b187e5485cebcbdbc7091960a875d386beb9c13b4a957f9c47"} Nov 26 22:26:09 crc kubenswrapper[4903]: I1126 22:26:09.235837 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-l9g4x" Nov 26 22:26:09 crc kubenswrapper[4903]: I1126 22:26:09.264075 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-l9g4x" podStartSLOduration=2.264057643 podStartE2EDuration="2.264057643s" podCreationTimestamp="2025-11-26 22:26:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:26:09.262319244 +0000 UTC m=+297.952554184" watchObservedRunningTime="2025-11-26 22:26:09.264057643 +0000 UTC m=+297.954292553" Nov 26 22:26:11 crc kubenswrapper[4903]: I1126 22:26:11.249385 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-qhkfh" event={"ID":"353697f8-aecd-4abf-b7a4-5f0f2f536ca9","Type":"ContainerStarted","Data":"da0a57134a4b0e00381683e61b0dbe25450f20bb2f86cdd0f2a3115654bb5bc3"} Nov 26 22:26:11 crc kubenswrapper[4903]: I1126 22:26:11.250167 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-qhkfh" Nov 26 22:26:11 crc kubenswrapper[4903]: I1126 22:26:11.259552 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-qhkfh" Nov 26 22:26:11 crc kubenswrapper[4903]: I1126 22:26:11.277134 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-qhkfh" podStartSLOduration=1.347448503 podStartE2EDuration="3.277107416s" podCreationTimestamp="2025-11-26 22:26:08 +0000 UTC" firstStartedPulling="2025-11-26 22:26:08.566157112 +0000 UTC m=+297.256392022" lastFinishedPulling="2025-11-26 22:26:10.495815985 +0000 UTC m=+299.186050935" observedRunningTime="2025-11-26 22:26:11.270370772 +0000 UTC m=+299.960605712" watchObservedRunningTime="2025-11-26 22:26:11.277107416 +0000 UTC m=+299.967342356" Nov 26 22:26:12 crc kubenswrapper[4903]: I1126 22:26:12.117133 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-zbj6w"] Nov 26 22:26:12 crc kubenswrapper[4903]: I1126 22:26:12.118764 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-db54df47d-zbj6w" Nov 26 22:26:12 crc kubenswrapper[4903]: I1126 22:26:12.124028 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-tls" Nov 26 22:26:12 crc kubenswrapper[4903]: I1126 22:26:12.124276 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"metrics-client-ca" Nov 26 22:26:12 crc kubenswrapper[4903]: I1126 22:26:12.124920 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-dockercfg-4ncq2" Nov 26 22:26:12 crc kubenswrapper[4903]: I1126 22:26:12.132145 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-kube-rbac-proxy-config" Nov 26 22:26:12 crc kubenswrapper[4903]: I1126 22:26:12.143828 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-zbj6w"] Nov 26 22:26:12 crc kubenswrapper[4903]: I1126 22:26:12.200074 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lhbs9\" (UniqueName: \"kubernetes.io/projected/b4adff3b-c737-4a13-a799-489d5a23a7b1-kube-api-access-lhbs9\") pod \"prometheus-operator-db54df47d-zbj6w\" (UID: \"b4adff3b-c737-4a13-a799-489d5a23a7b1\") " pod="openshift-monitoring/prometheus-operator-db54df47d-zbj6w" Nov 26 22:26:12 crc kubenswrapper[4903]: I1126 22:26:12.200153 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/b4adff3b-c737-4a13-a799-489d5a23a7b1-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-zbj6w\" (UID: \"b4adff3b-c737-4a13-a799-489d5a23a7b1\") " pod="openshift-monitoring/prometheus-operator-db54df47d-zbj6w" Nov 26 22:26:12 crc kubenswrapper[4903]: I1126 22:26:12.200189 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/b4adff3b-c737-4a13-a799-489d5a23a7b1-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-zbj6w\" (UID: \"b4adff3b-c737-4a13-a799-489d5a23a7b1\") " pod="openshift-monitoring/prometheus-operator-db54df47d-zbj6w" Nov 26 22:26:12 crc kubenswrapper[4903]: I1126 22:26:12.200215 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/b4adff3b-c737-4a13-a799-489d5a23a7b1-metrics-client-ca\") pod \"prometheus-operator-db54df47d-zbj6w\" (UID: \"b4adff3b-c737-4a13-a799-489d5a23a7b1\") " pod="openshift-monitoring/prometheus-operator-db54df47d-zbj6w" Nov 26 22:26:12 crc kubenswrapper[4903]: I1126 22:26:12.300844 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lhbs9\" (UniqueName: \"kubernetes.io/projected/b4adff3b-c737-4a13-a799-489d5a23a7b1-kube-api-access-lhbs9\") pod \"prometheus-operator-db54df47d-zbj6w\" (UID: \"b4adff3b-c737-4a13-a799-489d5a23a7b1\") " pod="openshift-monitoring/prometheus-operator-db54df47d-zbj6w" Nov 26 22:26:12 crc kubenswrapper[4903]: I1126 22:26:12.300923 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/b4adff3b-c737-4a13-a799-489d5a23a7b1-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-zbj6w\" (UID: \"b4adff3b-c737-4a13-a799-489d5a23a7b1\") " pod="openshift-monitoring/prometheus-operator-db54df47d-zbj6w" Nov 26 22:26:12 crc kubenswrapper[4903]: I1126 22:26:12.300968 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/b4adff3b-c737-4a13-a799-489d5a23a7b1-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-zbj6w\" (UID: \"b4adff3b-c737-4a13-a799-489d5a23a7b1\") " pod="openshift-monitoring/prometheus-operator-db54df47d-zbj6w" Nov 26 22:26:12 crc kubenswrapper[4903]: I1126 22:26:12.300995 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/b4adff3b-c737-4a13-a799-489d5a23a7b1-metrics-client-ca\") pod \"prometheus-operator-db54df47d-zbj6w\" (UID: \"b4adff3b-c737-4a13-a799-489d5a23a7b1\") " pod="openshift-monitoring/prometheus-operator-db54df47d-zbj6w" Nov 26 22:26:12 crc kubenswrapper[4903]: E1126 22:26:12.301180 4903 secret.go:188] Couldn't get secret openshift-monitoring/prometheus-operator-tls: secret "prometheus-operator-tls" not found Nov 26 22:26:12 crc kubenswrapper[4903]: E1126 22:26:12.301316 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b4adff3b-c737-4a13-a799-489d5a23a7b1-prometheus-operator-tls podName:b4adff3b-c737-4a13-a799-489d5a23a7b1 nodeName:}" failed. No retries permitted until 2025-11-26 22:26:12.801287291 +0000 UTC m=+301.491522231 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "prometheus-operator-tls" (UniqueName: "kubernetes.io/secret/b4adff3b-c737-4a13-a799-489d5a23a7b1-prometheus-operator-tls") pod "prometheus-operator-db54df47d-zbj6w" (UID: "b4adff3b-c737-4a13-a799-489d5a23a7b1") : secret "prometheus-operator-tls" not found Nov 26 22:26:12 crc kubenswrapper[4903]: I1126 22:26:12.301801 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/b4adff3b-c737-4a13-a799-489d5a23a7b1-metrics-client-ca\") pod \"prometheus-operator-db54df47d-zbj6w\" (UID: \"b4adff3b-c737-4a13-a799-489d5a23a7b1\") " pod="openshift-monitoring/prometheus-operator-db54df47d-zbj6w" Nov 26 22:26:12 crc kubenswrapper[4903]: I1126 22:26:12.318668 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/b4adff3b-c737-4a13-a799-489d5a23a7b1-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-zbj6w\" (UID: \"b4adff3b-c737-4a13-a799-489d5a23a7b1\") " pod="openshift-monitoring/prometheus-operator-db54df47d-zbj6w" Nov 26 22:26:12 crc kubenswrapper[4903]: I1126 22:26:12.319459 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lhbs9\" (UniqueName: \"kubernetes.io/projected/b4adff3b-c737-4a13-a799-489d5a23a7b1-kube-api-access-lhbs9\") pod \"prometheus-operator-db54df47d-zbj6w\" (UID: \"b4adff3b-c737-4a13-a799-489d5a23a7b1\") " pod="openshift-monitoring/prometheus-operator-db54df47d-zbj6w" Nov 26 22:26:12 crc kubenswrapper[4903]: I1126 22:26:12.808138 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/b4adff3b-c737-4a13-a799-489d5a23a7b1-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-zbj6w\" (UID: \"b4adff3b-c737-4a13-a799-489d5a23a7b1\") " pod="openshift-monitoring/prometheus-operator-db54df47d-zbj6w" Nov 26 22:26:12 crc kubenswrapper[4903]: I1126 22:26:12.819480 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/b4adff3b-c737-4a13-a799-489d5a23a7b1-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-zbj6w\" (UID: \"b4adff3b-c737-4a13-a799-489d5a23a7b1\") " pod="openshift-monitoring/prometheus-operator-db54df47d-zbj6w" Nov 26 22:26:13 crc kubenswrapper[4903]: I1126 22:26:13.074291 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-db54df47d-zbj6w" Nov 26 22:26:13 crc kubenswrapper[4903]: I1126 22:26:13.345528 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-zbj6w"] Nov 26 22:26:14 crc kubenswrapper[4903]: I1126 22:26:14.271932 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-zbj6w" event={"ID":"b4adff3b-c737-4a13-a799-489d5a23a7b1","Type":"ContainerStarted","Data":"85107be01e349358979b4f9f71a1740161faa13d50864e6a4378d59d24ece6c6"} Nov 26 22:26:16 crc kubenswrapper[4903]: I1126 22:26:16.292444 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-zbj6w" event={"ID":"b4adff3b-c737-4a13-a799-489d5a23a7b1","Type":"ContainerStarted","Data":"3746ddee1e04543773eb307328cf15b7058906a71d6b30c32bbfd052d266b3f2"} Nov 26 22:26:16 crc kubenswrapper[4903]: I1126 22:26:16.293041 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-zbj6w" event={"ID":"b4adff3b-c737-4a13-a799-489d5a23a7b1","Type":"ContainerStarted","Data":"975e36c4f719fd35c124275095e609cafb66773ff96d9254f35e5a09fddc78c6"} Nov 26 22:26:16 crc kubenswrapper[4903]: I1126 22:26:16.329802 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-operator-db54df47d-zbj6w" podStartSLOduration=1.972908045 podStartE2EDuration="4.329765453s" podCreationTimestamp="2025-11-26 22:26:12 +0000 UTC" firstStartedPulling="2025-11-26 22:26:13.357355947 +0000 UTC m=+302.047590877" lastFinishedPulling="2025-11-26 22:26:15.714213345 +0000 UTC m=+304.404448285" observedRunningTime="2025-11-26 22:26:16.318750291 +0000 UTC m=+305.008985231" watchObservedRunningTime="2025-11-26 22:26:16.329765453 +0000 UTC m=+305.020000413" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.451507 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-l9nx2"] Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.453233 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/openshift-state-metrics-566fddb674-l9nx2" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.461933 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-tls" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.461971 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-dockercfg-n5rgp" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.466459 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-kube-rbac-proxy-config" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.466603 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/node-exporter-4mrts"] Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.468543 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/node-exporter-4mrts" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.470328 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-dockercfg-7mft4" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.472089 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-kube-rbac-proxy-config" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.472697 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-tls" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.481103 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-l9nx2"] Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.503194 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/34d1ffb1-678f-42ed-8442-697521b88441-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-l9nx2\" (UID: \"34d1ffb1-678f-42ed-8442-697521b88441\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-l9nx2" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.503502 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/34d1ffb1-678f-42ed-8442-697521b88441-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-l9nx2\" (UID: \"34d1ffb1-678f-42ed-8442-697521b88441\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-l9nx2" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.503665 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/34d1ffb1-678f-42ed-8442-697521b88441-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-l9nx2\" (UID: \"34d1ffb1-678f-42ed-8442-697521b88441\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-l9nx2" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.503835 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jwz5g\" (UniqueName: \"kubernetes.io/projected/34d1ffb1-678f-42ed-8442-697521b88441-kube-api-access-jwz5g\") pod \"openshift-state-metrics-566fddb674-l9nx2\" (UID: \"34d1ffb1-678f-42ed-8442-697521b88441\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-l9nx2" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.511455 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-nnczb"] Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.512401 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-nnczb" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.515809 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-tls" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.515816 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-kube-rbac-proxy-config" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.516157 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-dockercfg-j85km" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.517046 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kube-state-metrics-custom-resource-state-configmap" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.527931 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-nnczb"] Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.605206 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/fe6d7150-5812-422d-b511-657f7c2465a5-sys\") pod \"node-exporter-4mrts\" (UID: \"fe6d7150-5812-422d-b511-657f7c2465a5\") " pod="openshift-monitoring/node-exporter-4mrts" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.605508 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/fe6d7150-5812-422d-b511-657f7c2465a5-node-exporter-textfile\") pod \"node-exporter-4mrts\" (UID: \"fe6d7150-5812-422d-b511-657f7c2465a5\") " pod="openshift-monitoring/node-exporter-4mrts" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.605633 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-nnczb\" (UID: \"b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-nnczb" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.605764 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-nnczb\" (UID: \"b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-nnczb" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.605888 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"root\" (UniqueName: \"kubernetes.io/host-path/fe6d7150-5812-422d-b511-657f7c2465a5-root\") pod \"node-exporter-4mrts\" (UID: \"fe6d7150-5812-422d-b511-657f7c2465a5\") " pod="openshift-monitoring/node-exporter-4mrts" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.605999 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/fe6d7150-5812-422d-b511-657f7c2465a5-node-exporter-tls\") pod \"node-exporter-4mrts\" (UID: \"fe6d7150-5812-422d-b511-657f7c2465a5\") " pod="openshift-monitoring/node-exporter-4mrts" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.606108 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/34d1ffb1-678f-42ed-8442-697521b88441-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-l9nx2\" (UID: \"34d1ffb1-678f-42ed-8442-697521b88441\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-l9nx2" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.606211 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/fe6d7150-5812-422d-b511-657f7c2465a5-metrics-client-ca\") pod \"node-exporter-4mrts\" (UID: \"fe6d7150-5812-422d-b511-657f7c2465a5\") " pod="openshift-monitoring/node-exporter-4mrts" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.606321 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5sbzx\" (UniqueName: \"kubernetes.io/projected/fe6d7150-5812-422d-b511-657f7c2465a5-kube-api-access-5sbzx\") pod \"node-exporter-4mrts\" (UID: \"fe6d7150-5812-422d-b511-657f7c2465a5\") " pod="openshift-monitoring/node-exporter-4mrts" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.606430 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lbjqj\" (UniqueName: \"kubernetes.io/projected/b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8-kube-api-access-lbjqj\") pod \"kube-state-metrics-777cb5bd5d-nnczb\" (UID: \"b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-nnczb" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.606539 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/fe6d7150-5812-422d-b511-657f7c2465a5-node-exporter-wtmp\") pod \"node-exporter-4mrts\" (UID: \"fe6d7150-5812-422d-b511-657f7c2465a5\") " pod="openshift-monitoring/node-exporter-4mrts" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.606632 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/fe6d7150-5812-422d-b511-657f7c2465a5-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-4mrts\" (UID: \"fe6d7150-5812-422d-b511-657f7c2465a5\") " pod="openshift-monitoring/node-exporter-4mrts" Nov 26 22:26:18 crc kubenswrapper[4903]: E1126 22:26:18.606249 4903 secret.go:188] Couldn't get secret openshift-monitoring/openshift-state-metrics-tls: secret "openshift-state-metrics-tls" not found Nov 26 22:26:18 crc kubenswrapper[4903]: E1126 22:26:18.606828 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/34d1ffb1-678f-42ed-8442-697521b88441-openshift-state-metrics-tls podName:34d1ffb1-678f-42ed-8442-697521b88441 nodeName:}" failed. No retries permitted until 2025-11-26 22:26:19.106808577 +0000 UTC m=+307.797043487 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "openshift-state-metrics-tls" (UniqueName: "kubernetes.io/secret/34d1ffb1-678f-42ed-8442-697521b88441-openshift-state-metrics-tls") pod "openshift-state-metrics-566fddb674-l9nx2" (UID: "34d1ffb1-678f-42ed-8442-697521b88441") : secret "openshift-state-metrics-tls" not found Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.606951 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-nnczb\" (UID: \"b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-nnczb" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.607076 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jwz5g\" (UniqueName: \"kubernetes.io/projected/34d1ffb1-678f-42ed-8442-697521b88441-kube-api-access-jwz5g\") pod \"openshift-state-metrics-566fddb674-l9nx2\" (UID: \"34d1ffb1-678f-42ed-8442-697521b88441\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-l9nx2" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.607190 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/34d1ffb1-678f-42ed-8442-697521b88441-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-l9nx2\" (UID: \"34d1ffb1-678f-42ed-8442-697521b88441\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-l9nx2" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.607334 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-nnczb\" (UID: \"b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-nnczb" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.607464 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/34d1ffb1-678f-42ed-8442-697521b88441-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-l9nx2\" (UID: \"34d1ffb1-678f-42ed-8442-697521b88441\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-l9nx2" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.607579 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-nnczb\" (UID: \"b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-nnczb" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.608255 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/34d1ffb1-678f-42ed-8442-697521b88441-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-l9nx2\" (UID: \"34d1ffb1-678f-42ed-8442-697521b88441\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-l9nx2" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.612682 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/34d1ffb1-678f-42ed-8442-697521b88441-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-l9nx2\" (UID: \"34d1ffb1-678f-42ed-8442-697521b88441\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-l9nx2" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.625529 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jwz5g\" (UniqueName: \"kubernetes.io/projected/34d1ffb1-678f-42ed-8442-697521b88441-kube-api-access-jwz5g\") pod \"openshift-state-metrics-566fddb674-l9nx2\" (UID: \"34d1ffb1-678f-42ed-8442-697521b88441\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-l9nx2" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.708593 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lbjqj\" (UniqueName: \"kubernetes.io/projected/b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8-kube-api-access-lbjqj\") pod \"kube-state-metrics-777cb5bd5d-nnczb\" (UID: \"b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-nnczb" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.708636 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/fe6d7150-5812-422d-b511-657f7c2465a5-node-exporter-wtmp\") pod \"node-exporter-4mrts\" (UID: \"fe6d7150-5812-422d-b511-657f7c2465a5\") " pod="openshift-monitoring/node-exporter-4mrts" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.708658 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/fe6d7150-5812-422d-b511-657f7c2465a5-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-4mrts\" (UID: \"fe6d7150-5812-422d-b511-657f7c2465a5\") " pod="openshift-monitoring/node-exporter-4mrts" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.708678 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-nnczb\" (UID: \"b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-nnczb" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.708728 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-nnczb\" (UID: \"b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-nnczb" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.708764 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-nnczb\" (UID: \"b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-nnczb" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.708788 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/fe6d7150-5812-422d-b511-657f7c2465a5-sys\") pod \"node-exporter-4mrts\" (UID: \"fe6d7150-5812-422d-b511-657f7c2465a5\") " pod="openshift-monitoring/node-exporter-4mrts" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.708806 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/fe6d7150-5812-422d-b511-657f7c2465a5-node-exporter-textfile\") pod \"node-exporter-4mrts\" (UID: \"fe6d7150-5812-422d-b511-657f7c2465a5\") " pod="openshift-monitoring/node-exporter-4mrts" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.708824 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-nnczb\" (UID: \"b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-nnczb" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.708840 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-nnczb\" (UID: \"b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-nnczb" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.708855 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"root\" (UniqueName: \"kubernetes.io/host-path/fe6d7150-5812-422d-b511-657f7c2465a5-root\") pod \"node-exporter-4mrts\" (UID: \"fe6d7150-5812-422d-b511-657f7c2465a5\") " pod="openshift-monitoring/node-exporter-4mrts" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.708869 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/fe6d7150-5812-422d-b511-657f7c2465a5-node-exporter-tls\") pod \"node-exporter-4mrts\" (UID: \"fe6d7150-5812-422d-b511-657f7c2465a5\") " pod="openshift-monitoring/node-exporter-4mrts" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.708893 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/fe6d7150-5812-422d-b511-657f7c2465a5-metrics-client-ca\") pod \"node-exporter-4mrts\" (UID: \"fe6d7150-5812-422d-b511-657f7c2465a5\") " pod="openshift-monitoring/node-exporter-4mrts" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.708914 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5sbzx\" (UniqueName: \"kubernetes.io/projected/fe6d7150-5812-422d-b511-657f7c2465a5-kube-api-access-5sbzx\") pod \"node-exporter-4mrts\" (UID: \"fe6d7150-5812-422d-b511-657f7c2465a5\") " pod="openshift-monitoring/node-exporter-4mrts" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.709188 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/fe6d7150-5812-422d-b511-657f7c2465a5-sys\") pod \"node-exporter-4mrts\" (UID: \"fe6d7150-5812-422d-b511-657f7c2465a5\") " pod="openshift-monitoring/node-exporter-4mrts" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.709468 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"root\" (UniqueName: \"kubernetes.io/host-path/fe6d7150-5812-422d-b511-657f7c2465a5-root\") pod \"node-exporter-4mrts\" (UID: \"fe6d7150-5812-422d-b511-657f7c2465a5\") " pod="openshift-monitoring/node-exporter-4mrts" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.709541 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/fe6d7150-5812-422d-b511-657f7c2465a5-node-exporter-textfile\") pod \"node-exporter-4mrts\" (UID: \"fe6d7150-5812-422d-b511-657f7c2465a5\") " pod="openshift-monitoring/node-exporter-4mrts" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.709732 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/fe6d7150-5812-422d-b511-657f7c2465a5-node-exporter-wtmp\") pod \"node-exporter-4mrts\" (UID: \"fe6d7150-5812-422d-b511-657f7c2465a5\") " pod="openshift-monitoring/node-exporter-4mrts" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.710457 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/fe6d7150-5812-422d-b511-657f7c2465a5-metrics-client-ca\") pod \"node-exporter-4mrts\" (UID: \"fe6d7150-5812-422d-b511-657f7c2465a5\") " pod="openshift-monitoring/node-exporter-4mrts" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.712473 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-nnczb\" (UID: \"b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-nnczb" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.712838 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/fe6d7150-5812-422d-b511-657f7c2465a5-node-exporter-tls\") pod \"node-exporter-4mrts\" (UID: \"fe6d7150-5812-422d-b511-657f7c2465a5\") " pod="openshift-monitoring/node-exporter-4mrts" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.713277 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-nnczb\" (UID: \"b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-nnczb" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.714002 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-nnczb\" (UID: \"b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-nnczb" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.715093 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-nnczb\" (UID: \"b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-nnczb" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.723426 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-nnczb\" (UID: \"b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-nnczb" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.724398 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/fe6d7150-5812-422d-b511-657f7c2465a5-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-4mrts\" (UID: \"fe6d7150-5812-422d-b511-657f7c2465a5\") " pod="openshift-monitoring/node-exporter-4mrts" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.727064 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lbjqj\" (UniqueName: \"kubernetes.io/projected/b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8-kube-api-access-lbjqj\") pod \"kube-state-metrics-777cb5bd5d-nnczb\" (UID: \"b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-nnczb" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.727218 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5sbzx\" (UniqueName: \"kubernetes.io/projected/fe6d7150-5812-422d-b511-657f7c2465a5-kube-api-access-5sbzx\") pod \"node-exporter-4mrts\" (UID: \"fe6d7150-5812-422d-b511-657f7c2465a5\") " pod="openshift-monitoring/node-exporter-4mrts" Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.778918 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/node-exporter-4mrts" Nov 26 22:26:18 crc kubenswrapper[4903]: W1126 22:26:18.800789 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe6d7150_5812_422d_b511_657f7c2465a5.slice/crio-26f2a52feabfab75fd7b338a1ff0d72b2b1899599a0b3bb04a83c1506172e76a WatchSource:0}: Error finding container 26f2a52feabfab75fd7b338a1ff0d72b2b1899599a0b3bb04a83c1506172e76a: Status 404 returned error can't find the container with id 26f2a52feabfab75fd7b338a1ff0d72b2b1899599a0b3bb04a83c1506172e76a Nov 26 22:26:18 crc kubenswrapper[4903]: I1126 22:26:18.822301 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-nnczb" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.003596 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-nnczb"] Nov 26 22:26:19 crc kubenswrapper[4903]: W1126 22:26:19.008584 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb1d331fa_ca9b_4f3c_88a5_f017b75bdcf8.slice/crio-7e836f340806edc112d58d6358cb13769f3f57b309cd0e593d95a62e1afd6af9 WatchSource:0}: Error finding container 7e836f340806edc112d58d6358cb13769f3f57b309cd0e593d95a62e1afd6af9: Status 404 returned error can't find the container with id 7e836f340806edc112d58d6358cb13769f3f57b309cd0e593d95a62e1afd6af9 Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.113932 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/34d1ffb1-678f-42ed-8442-697521b88441-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-l9nx2\" (UID: \"34d1ffb1-678f-42ed-8442-697521b88441\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-l9nx2" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.118259 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/34d1ffb1-678f-42ed-8442-697521b88441-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-l9nx2\" (UID: \"34d1ffb1-678f-42ed-8442-697521b88441\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-l9nx2" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.312386 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-nnczb" event={"ID":"b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8","Type":"ContainerStarted","Data":"7e836f340806edc112d58d6358cb13769f3f57b309cd0e593d95a62e1afd6af9"} Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.313744 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-4mrts" event={"ID":"fe6d7150-5812-422d-b511-657f7c2465a5","Type":"ContainerStarted","Data":"26f2a52feabfab75fd7b338a1ff0d72b2b1899599a0b3bb04a83c1506172e76a"} Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.366181 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/openshift-state-metrics-566fddb674-l9nx2" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.592344 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.595335 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.598268 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-web-config" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.598504 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.598650 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-tls" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.598809 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy-metric" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.598938 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-tls-assets-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.599082 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-generated" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.599614 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy-web" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.599536 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-dockercfg-xxdkf" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.604593 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"alertmanager-trusted-ca-bundle" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.619447 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.641050 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-l9nx2"] Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.724897 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cab70503-fab1-47f5-86d6-c6181d0fc1a3-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.724950 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/cab70503-fab1-47f5-86d6-c6181d0fc1a3-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.724981 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/cab70503-fab1-47f5-86d6-c6181d0fc1a3-tls-assets\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.725025 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/cab70503-fab1-47f5-86d6-c6181d0fc1a3-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.725047 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/cab70503-fab1-47f5-86d6-c6181d0fc1a3-config-volume\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.725063 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/cab70503-fab1-47f5-86d6-c6181d0fc1a3-config-out\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.725109 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/cab70503-fab1-47f5-86d6-c6181d0fc1a3-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.725547 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/cab70503-fab1-47f5-86d6-c6181d0fc1a3-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.725598 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/cab70503-fab1-47f5-86d6-c6181d0fc1a3-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.725620 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/cab70503-fab1-47f5-86d6-c6181d0fc1a3-web-config\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.725734 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/cab70503-fab1-47f5-86d6-c6181d0fc1a3-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.725796 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ml2sj\" (UniqueName: \"kubernetes.io/projected/cab70503-fab1-47f5-86d6-c6181d0fc1a3-kube-api-access-ml2sj\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.827169 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/cab70503-fab1-47f5-86d6-c6181d0fc1a3-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.827213 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/cab70503-fab1-47f5-86d6-c6181d0fc1a3-config-volume\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.827243 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/cab70503-fab1-47f5-86d6-c6181d0fc1a3-config-out\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.827283 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/cab70503-fab1-47f5-86d6-c6181d0fc1a3-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.827325 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/cab70503-fab1-47f5-86d6-c6181d0fc1a3-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.827346 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/cab70503-fab1-47f5-86d6-c6181d0fc1a3-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.827365 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/cab70503-fab1-47f5-86d6-c6181d0fc1a3-web-config\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.827396 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/cab70503-fab1-47f5-86d6-c6181d0fc1a3-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.827413 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ml2sj\" (UniqueName: \"kubernetes.io/projected/cab70503-fab1-47f5-86d6-c6181d0fc1a3-kube-api-access-ml2sj\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.827431 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cab70503-fab1-47f5-86d6-c6181d0fc1a3-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.827449 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/cab70503-fab1-47f5-86d6-c6181d0fc1a3-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.827467 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/cab70503-fab1-47f5-86d6-c6181d0fc1a3-tls-assets\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.828827 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/cab70503-fab1-47f5-86d6-c6181d0fc1a3-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.829057 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/cab70503-fab1-47f5-86d6-c6181d0fc1a3-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.830159 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cab70503-fab1-47f5-86d6-c6181d0fc1a3-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.835475 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/cab70503-fab1-47f5-86d6-c6181d0fc1a3-config-volume\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.836604 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/cab70503-fab1-47f5-86d6-c6181d0fc1a3-config-out\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.837393 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/cab70503-fab1-47f5-86d6-c6181d0fc1a3-web-config\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.839365 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/cab70503-fab1-47f5-86d6-c6181d0fc1a3-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.839817 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/cab70503-fab1-47f5-86d6-c6181d0fc1a3-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.840850 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/cab70503-fab1-47f5-86d6-c6181d0fc1a3-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.851510 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/cab70503-fab1-47f5-86d6-c6181d0fc1a3-tls-assets\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.851917 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/cab70503-fab1-47f5-86d6-c6181d0fc1a3-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.853485 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ml2sj\" (UniqueName: \"kubernetes.io/projected/cab70503-fab1-47f5-86d6-c6181d0fc1a3-kube-api-access-ml2sj\") pod \"alertmanager-main-0\" (UID: \"cab70503-fab1-47f5-86d6-c6181d0fc1a3\") " pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:19 crc kubenswrapper[4903]: I1126 22:26:19.916646 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/alertmanager-main-0" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.216252 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Nov 26 22:26:20 crc kubenswrapper[4903]: W1126 22:26:20.224019 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcab70503_fab1_47f5_86d6_c6181d0fc1a3.slice/crio-fc3510e5a2a8a81491025bd87282815eb5d1068b732856f031626e576c35bbad WatchSource:0}: Error finding container fc3510e5a2a8a81491025bd87282815eb5d1068b732856f031626e576c35bbad: Status 404 returned error can't find the container with id fc3510e5a2a8a81491025bd87282815eb5d1068b732856f031626e576c35bbad Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.320680 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-l9nx2" event={"ID":"34d1ffb1-678f-42ed-8442-697521b88441","Type":"ContainerStarted","Data":"5181645045d4459e92a44f785dba7f5876f0c4fb91836209765435960a1578e9"} Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.320729 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-l9nx2" event={"ID":"34d1ffb1-678f-42ed-8442-697521b88441","Type":"ContainerStarted","Data":"2423beae5ab88afec0f506c636e79dc766a21c994994b98c385fc65b03da5a82"} Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.320741 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-l9nx2" event={"ID":"34d1ffb1-678f-42ed-8442-697521b88441","Type":"ContainerStarted","Data":"db799633f7f96a69d20150462a7190365457daa688d3a3fb76e42187f000c652"} Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.321650 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"cab70503-fab1-47f5-86d6-c6181d0fc1a3","Type":"ContainerStarted","Data":"fc3510e5a2a8a81491025bd87282815eb5d1068b732856f031626e576c35bbad"} Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.322752 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-4mrts" event={"ID":"fe6d7150-5812-422d-b511-657f7c2465a5","Type":"ContainerStarted","Data":"ca0334e51aabe499a1ac30a784a78b75dd09e0ea2f767294a1ae003a275d69c8"} Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.580993 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/thanos-querier-6c49876-bwx7j"] Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.583002 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.585827 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-metrics" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.586048 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-rules" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.586148 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-web" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.586386 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-grpc-tls-agrneh23ptocm" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.586810 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-tls" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.586877 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.587792 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-dockercfg-w7mr5" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.598119 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/thanos-querier-6c49876-bwx7j"] Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.638432 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/224db6b5-404e-4a5f-be90-342998388a71-secret-thanos-querier-tls\") pod \"thanos-querier-6c49876-bwx7j\" (UID: \"224db6b5-404e-4a5f-be90-342998388a71\") " pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.638481 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/224db6b5-404e-4a5f-be90-342998388a71-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-6c49876-bwx7j\" (UID: \"224db6b5-404e-4a5f-be90-342998388a71\") " pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.638522 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/224db6b5-404e-4a5f-be90-342998388a71-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-6c49876-bwx7j\" (UID: \"224db6b5-404e-4a5f-be90-342998388a71\") " pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.638577 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/224db6b5-404e-4a5f-be90-342998388a71-metrics-client-ca\") pod \"thanos-querier-6c49876-bwx7j\" (UID: \"224db6b5-404e-4a5f-be90-342998388a71\") " pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.638670 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/224db6b5-404e-4a5f-be90-342998388a71-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-6c49876-bwx7j\" (UID: \"224db6b5-404e-4a5f-be90-342998388a71\") " pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.638830 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tpzhm\" (UniqueName: \"kubernetes.io/projected/224db6b5-404e-4a5f-be90-342998388a71-kube-api-access-tpzhm\") pod \"thanos-querier-6c49876-bwx7j\" (UID: \"224db6b5-404e-4a5f-be90-342998388a71\") " pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.638884 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/224db6b5-404e-4a5f-be90-342998388a71-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-6c49876-bwx7j\" (UID: \"224db6b5-404e-4a5f-be90-342998388a71\") " pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.638991 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/224db6b5-404e-4a5f-be90-342998388a71-secret-grpc-tls\") pod \"thanos-querier-6c49876-bwx7j\" (UID: \"224db6b5-404e-4a5f-be90-342998388a71\") " pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.740445 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/224db6b5-404e-4a5f-be90-342998388a71-secret-thanos-querier-tls\") pod \"thanos-querier-6c49876-bwx7j\" (UID: \"224db6b5-404e-4a5f-be90-342998388a71\") " pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.740527 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/224db6b5-404e-4a5f-be90-342998388a71-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-6c49876-bwx7j\" (UID: \"224db6b5-404e-4a5f-be90-342998388a71\") " pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.740617 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/224db6b5-404e-4a5f-be90-342998388a71-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-6c49876-bwx7j\" (UID: \"224db6b5-404e-4a5f-be90-342998388a71\") " pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.740659 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/224db6b5-404e-4a5f-be90-342998388a71-metrics-client-ca\") pod \"thanos-querier-6c49876-bwx7j\" (UID: \"224db6b5-404e-4a5f-be90-342998388a71\") " pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.740747 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/224db6b5-404e-4a5f-be90-342998388a71-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-6c49876-bwx7j\" (UID: \"224db6b5-404e-4a5f-be90-342998388a71\") " pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.740811 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tpzhm\" (UniqueName: \"kubernetes.io/projected/224db6b5-404e-4a5f-be90-342998388a71-kube-api-access-tpzhm\") pod \"thanos-querier-6c49876-bwx7j\" (UID: \"224db6b5-404e-4a5f-be90-342998388a71\") " pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.740851 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/224db6b5-404e-4a5f-be90-342998388a71-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-6c49876-bwx7j\" (UID: \"224db6b5-404e-4a5f-be90-342998388a71\") " pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.740900 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/224db6b5-404e-4a5f-be90-342998388a71-secret-grpc-tls\") pod \"thanos-querier-6c49876-bwx7j\" (UID: \"224db6b5-404e-4a5f-be90-342998388a71\") " pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.743354 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/224db6b5-404e-4a5f-be90-342998388a71-metrics-client-ca\") pod \"thanos-querier-6c49876-bwx7j\" (UID: \"224db6b5-404e-4a5f-be90-342998388a71\") " pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.747423 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/224db6b5-404e-4a5f-be90-342998388a71-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-6c49876-bwx7j\" (UID: \"224db6b5-404e-4a5f-be90-342998388a71\") " pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.747743 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/224db6b5-404e-4a5f-be90-342998388a71-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-6c49876-bwx7j\" (UID: \"224db6b5-404e-4a5f-be90-342998388a71\") " pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.750870 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/224db6b5-404e-4a5f-be90-342998388a71-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-6c49876-bwx7j\" (UID: \"224db6b5-404e-4a5f-be90-342998388a71\") " pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.751662 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/224db6b5-404e-4a5f-be90-342998388a71-secret-grpc-tls\") pod \"thanos-querier-6c49876-bwx7j\" (UID: \"224db6b5-404e-4a5f-be90-342998388a71\") " pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.752464 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/224db6b5-404e-4a5f-be90-342998388a71-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-6c49876-bwx7j\" (UID: \"224db6b5-404e-4a5f-be90-342998388a71\") " pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.759359 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/224db6b5-404e-4a5f-be90-342998388a71-secret-thanos-querier-tls\") pod \"thanos-querier-6c49876-bwx7j\" (UID: \"224db6b5-404e-4a5f-be90-342998388a71\") " pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.772376 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tpzhm\" (UniqueName: \"kubernetes.io/projected/224db6b5-404e-4a5f-be90-342998388a71-kube-api-access-tpzhm\") pod \"thanos-querier-6c49876-bwx7j\" (UID: \"224db6b5-404e-4a5f-be90-342998388a71\") " pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" Nov 26 22:26:20 crc kubenswrapper[4903]: I1126 22:26:20.897005 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" Nov 26 22:26:21 crc kubenswrapper[4903]: I1126 22:26:21.339857 4903 generic.go:334] "Generic (PLEG): container finished" podID="fe6d7150-5812-422d-b511-657f7c2465a5" containerID="ca0334e51aabe499a1ac30a784a78b75dd09e0ea2f767294a1ae003a275d69c8" exitCode=0 Nov 26 22:26:21 crc kubenswrapper[4903]: I1126 22:26:21.340856 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-4mrts" event={"ID":"fe6d7150-5812-422d-b511-657f7c2465a5","Type":"ContainerDied","Data":"ca0334e51aabe499a1ac30a784a78b75dd09e0ea2f767294a1ae003a275d69c8"} Nov 26 22:26:21 crc kubenswrapper[4903]: I1126 22:26:21.377270 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/thanos-querier-6c49876-bwx7j"] Nov 26 22:26:21 crc kubenswrapper[4903]: W1126 22:26:21.389094 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod224db6b5_404e_4a5f_be90_342998388a71.slice/crio-d32a56f4641c133b4a3c324166975d77be56962a72f7fcc5ae9178cb4e8efd60 WatchSource:0}: Error finding container d32a56f4641c133b4a3c324166975d77be56962a72f7fcc5ae9178cb4e8efd60: Status 404 returned error can't find the container with id d32a56f4641c133b4a3c324166975d77be56962a72f7fcc5ae9178cb4e8efd60 Nov 26 22:26:22 crc kubenswrapper[4903]: I1126 22:26:22.347418 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-4mrts" event={"ID":"fe6d7150-5812-422d-b511-657f7c2465a5","Type":"ContainerStarted","Data":"16a9130ee7d5ddb782a325e4a853dd00289e17ce701d831e57fcb8f0cdafec07"} Nov 26 22:26:22 crc kubenswrapper[4903]: I1126 22:26:22.347507 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-4mrts" event={"ID":"fe6d7150-5812-422d-b511-657f7c2465a5","Type":"ContainerStarted","Data":"a943588740a7b495e4768c680e0556a4fd800ace0a32d4207a68f806a2658cf7"} Nov 26 22:26:22 crc kubenswrapper[4903]: I1126 22:26:22.349512 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" event={"ID":"224db6b5-404e-4a5f-be90-342998388a71","Type":"ContainerStarted","Data":"d32a56f4641c133b4a3c324166975d77be56962a72f7fcc5ae9178cb4e8efd60"} Nov 26 22:26:22 crc kubenswrapper[4903]: I1126 22:26:22.352485 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-nnczb" event={"ID":"b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8","Type":"ContainerStarted","Data":"f5084c9222c11b76422d041b1dc105f79e1c2d4a6318ec482b6a3d6dee96ee7a"} Nov 26 22:26:22 crc kubenswrapper[4903]: I1126 22:26:22.368540 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/node-exporter-4mrts" podStartSLOduration=3.065314446 podStartE2EDuration="4.368512923s" podCreationTimestamp="2025-11-26 22:26:18 +0000 UTC" firstStartedPulling="2025-11-26 22:26:18.802749637 +0000 UTC m=+307.492984557" lastFinishedPulling="2025-11-26 22:26:20.105948114 +0000 UTC m=+308.796183034" observedRunningTime="2025-11-26 22:26:22.368087581 +0000 UTC m=+311.058322491" watchObservedRunningTime="2025-11-26 22:26:22.368512923 +0000 UTC m=+311.058747833" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.316553 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-869cd9b9c9-pnxtx"] Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.317463 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-869cd9b9c9-pnxtx" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.327135 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-869cd9b9c9-pnxtx"] Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.360435 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"cab70503-fab1-47f5-86d6-c6181d0fc1a3","Type":"ContainerStarted","Data":"1e324e2824ec6915b8439d2c61511517258c3ca50db341ad015c3d253a8cf367"} Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.362914 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-l9nx2" event={"ID":"34d1ffb1-678f-42ed-8442-697521b88441","Type":"ContainerStarted","Data":"5e1e7fcf7a8d1b07bca570bfbb72e684c612f0a22586e5829ae542ec59c40a22"} Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.366981 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-nnczb" event={"ID":"b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8","Type":"ContainerStarted","Data":"3f94d482e5262b7d94e43c48809c503485c24f4ea1c53b6a7473fedbd81ac1cd"} Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.367032 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-nnczb" event={"ID":"b1d331fa-ca9b-4f3c-88a5-f017b75bdcf8","Type":"ContainerStarted","Data":"cfd41c1d4c6e30dc6cb0ec81d03ea1ba31ce8e08daa024acbcfeb65bdb846c77"} Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.387591 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9bzzb\" (UniqueName: \"kubernetes.io/projected/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-kube-api-access-9bzzb\") pod \"console-869cd9b9c9-pnxtx\" (UID: \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\") " pod="openshift-console/console-869cd9b9c9-pnxtx" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.387631 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-console-serving-cert\") pod \"console-869cd9b9c9-pnxtx\" (UID: \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\") " pod="openshift-console/console-869cd9b9c9-pnxtx" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.387664 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-console-config\") pod \"console-869cd9b9c9-pnxtx\" (UID: \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\") " pod="openshift-console/console-869cd9b9c9-pnxtx" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.387739 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-console-oauth-config\") pod \"console-869cd9b9c9-pnxtx\" (UID: \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\") " pod="openshift-console/console-869cd9b9c9-pnxtx" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.387780 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-service-ca\") pod \"console-869cd9b9c9-pnxtx\" (UID: \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\") " pod="openshift-console/console-869cd9b9c9-pnxtx" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.387814 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-trusted-ca-bundle\") pod \"console-869cd9b9c9-pnxtx\" (UID: \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\") " pod="openshift-console/console-869cd9b9c9-pnxtx" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.387833 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-oauth-serving-cert\") pod \"console-869cd9b9c9-pnxtx\" (UID: \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\") " pod="openshift-console/console-869cd9b9c9-pnxtx" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.412232 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-nnczb" podStartSLOduration=2.326382433 podStartE2EDuration="5.412211911s" podCreationTimestamp="2025-11-26 22:26:18 +0000 UTC" firstStartedPulling="2025-11-26 22:26:19.010033907 +0000 UTC m=+307.700268817" lastFinishedPulling="2025-11-26 22:26:22.095863385 +0000 UTC m=+310.786098295" observedRunningTime="2025-11-26 22:26:23.407529693 +0000 UTC m=+312.097764603" watchObservedRunningTime="2025-11-26 22:26:23.412211911 +0000 UTC m=+312.102446821" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.427677 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/openshift-state-metrics-566fddb674-l9nx2" podStartSLOduration=2.179262449 podStartE2EDuration="5.427658704s" podCreationTimestamp="2025-11-26 22:26:18 +0000 UTC" firstStartedPulling="2025-11-26 22:26:19.891007015 +0000 UTC m=+308.581241935" lastFinishedPulling="2025-11-26 22:26:23.13940325 +0000 UTC m=+311.829638190" observedRunningTime="2025-11-26 22:26:23.42678773 +0000 UTC m=+312.117022640" watchObservedRunningTime="2025-11-26 22:26:23.427658704 +0000 UTC m=+312.117893614" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.489213 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-console-oauth-config\") pod \"console-869cd9b9c9-pnxtx\" (UID: \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\") " pod="openshift-console/console-869cd9b9c9-pnxtx" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.489316 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-service-ca\") pod \"console-869cd9b9c9-pnxtx\" (UID: \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\") " pod="openshift-console/console-869cd9b9c9-pnxtx" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.489372 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-trusted-ca-bundle\") pod \"console-869cd9b9c9-pnxtx\" (UID: \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\") " pod="openshift-console/console-869cd9b9c9-pnxtx" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.489403 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-oauth-serving-cert\") pod \"console-869cd9b9c9-pnxtx\" (UID: \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\") " pod="openshift-console/console-869cd9b9c9-pnxtx" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.489517 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9bzzb\" (UniqueName: \"kubernetes.io/projected/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-kube-api-access-9bzzb\") pod \"console-869cd9b9c9-pnxtx\" (UID: \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\") " pod="openshift-console/console-869cd9b9c9-pnxtx" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.489535 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-console-serving-cert\") pod \"console-869cd9b9c9-pnxtx\" (UID: \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\") " pod="openshift-console/console-869cd9b9c9-pnxtx" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.489604 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-console-config\") pod \"console-869cd9b9c9-pnxtx\" (UID: \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\") " pod="openshift-console/console-869cd9b9c9-pnxtx" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.490428 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-console-config\") pod \"console-869cd9b9c9-pnxtx\" (UID: \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\") " pod="openshift-console/console-869cd9b9c9-pnxtx" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.490878 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-service-ca\") pod \"console-869cd9b9c9-pnxtx\" (UID: \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\") " pod="openshift-console/console-869cd9b9c9-pnxtx" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.491044 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-oauth-serving-cert\") pod \"console-869cd9b9c9-pnxtx\" (UID: \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\") " pod="openshift-console/console-869cd9b9c9-pnxtx" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.491994 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-trusted-ca-bundle\") pod \"console-869cd9b9c9-pnxtx\" (UID: \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\") " pod="openshift-console/console-869cd9b9c9-pnxtx" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.498313 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-console-serving-cert\") pod \"console-869cd9b9c9-pnxtx\" (UID: \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\") " pod="openshift-console/console-869cd9b9c9-pnxtx" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.498786 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-console-oauth-config\") pod \"console-869cd9b9c9-pnxtx\" (UID: \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\") " pod="openshift-console/console-869cd9b9c9-pnxtx" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.507333 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9bzzb\" (UniqueName: \"kubernetes.io/projected/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-kube-api-access-9bzzb\") pod \"console-869cd9b9c9-pnxtx\" (UID: \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\") " pod="openshift-console/console-869cd9b9c9-pnxtx" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.633779 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-869cd9b9c9-pnxtx" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.770026 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/metrics-server-6d8f7b4658-vq858"] Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.771888 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/metrics-server-6d8f7b4658-vq858" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.780313 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kubelet-serving-ca-bundle" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.780440 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-client-certs" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.780634 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-dockercfg-gc2r8" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.780684 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-tls" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.780823 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"metrics-server-audit-profiles" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.780918 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-93ubnjffr43m1" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.786154 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/metrics-server-6d8f7b4658-vq858"] Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.894943 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/c2d272dc-c249-4079-acc3-c218cf167a1e-secret-metrics-server-tls\") pod \"metrics-server-6d8f7b4658-vq858\" (UID: \"c2d272dc-c249-4079-acc3-c218cf167a1e\") " pod="openshift-monitoring/metrics-server-6d8f7b4658-vq858" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.895014 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bj6rc\" (UniqueName: \"kubernetes.io/projected/c2d272dc-c249-4079-acc3-c218cf167a1e-kube-api-access-bj6rc\") pod \"metrics-server-6d8f7b4658-vq858\" (UID: \"c2d272dc-c249-4079-acc3-c218cf167a1e\") " pod="openshift-monitoring/metrics-server-6d8f7b4658-vq858" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.895047 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/c2d272dc-c249-4079-acc3-c218cf167a1e-metrics-server-audit-profiles\") pod \"metrics-server-6d8f7b4658-vq858\" (UID: \"c2d272dc-c249-4079-acc3-c218cf167a1e\") " pod="openshift-monitoring/metrics-server-6d8f7b4658-vq858" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.895078 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/c2d272dc-c249-4079-acc3-c218cf167a1e-secret-metrics-client-certs\") pod \"metrics-server-6d8f7b4658-vq858\" (UID: \"c2d272dc-c249-4079-acc3-c218cf167a1e\") " pod="openshift-monitoring/metrics-server-6d8f7b4658-vq858" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.895380 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c2d272dc-c249-4079-acc3-c218cf167a1e-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-6d8f7b4658-vq858\" (UID: \"c2d272dc-c249-4079-acc3-c218cf167a1e\") " pod="openshift-monitoring/metrics-server-6d8f7b4658-vq858" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.895501 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/c2d272dc-c249-4079-acc3-c218cf167a1e-audit-log\") pod \"metrics-server-6d8f7b4658-vq858\" (UID: \"c2d272dc-c249-4079-acc3-c218cf167a1e\") " pod="openshift-monitoring/metrics-server-6d8f7b4658-vq858" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.895532 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2d272dc-c249-4079-acc3-c218cf167a1e-client-ca-bundle\") pod \"metrics-server-6d8f7b4658-vq858\" (UID: \"c2d272dc-c249-4079-acc3-c218cf167a1e\") " pod="openshift-monitoring/metrics-server-6d8f7b4658-vq858" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.996642 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c2d272dc-c249-4079-acc3-c218cf167a1e-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-6d8f7b4658-vq858\" (UID: \"c2d272dc-c249-4079-acc3-c218cf167a1e\") " pod="openshift-monitoring/metrics-server-6d8f7b4658-vq858" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.996706 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/c2d272dc-c249-4079-acc3-c218cf167a1e-audit-log\") pod \"metrics-server-6d8f7b4658-vq858\" (UID: \"c2d272dc-c249-4079-acc3-c218cf167a1e\") " pod="openshift-monitoring/metrics-server-6d8f7b4658-vq858" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.996725 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2d272dc-c249-4079-acc3-c218cf167a1e-client-ca-bundle\") pod \"metrics-server-6d8f7b4658-vq858\" (UID: \"c2d272dc-c249-4079-acc3-c218cf167a1e\") " pod="openshift-monitoring/metrics-server-6d8f7b4658-vq858" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.996759 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/c2d272dc-c249-4079-acc3-c218cf167a1e-secret-metrics-server-tls\") pod \"metrics-server-6d8f7b4658-vq858\" (UID: \"c2d272dc-c249-4079-acc3-c218cf167a1e\") " pod="openshift-monitoring/metrics-server-6d8f7b4658-vq858" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.996789 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bj6rc\" (UniqueName: \"kubernetes.io/projected/c2d272dc-c249-4079-acc3-c218cf167a1e-kube-api-access-bj6rc\") pod \"metrics-server-6d8f7b4658-vq858\" (UID: \"c2d272dc-c249-4079-acc3-c218cf167a1e\") " pod="openshift-monitoring/metrics-server-6d8f7b4658-vq858" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.996810 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/c2d272dc-c249-4079-acc3-c218cf167a1e-metrics-server-audit-profiles\") pod \"metrics-server-6d8f7b4658-vq858\" (UID: \"c2d272dc-c249-4079-acc3-c218cf167a1e\") " pod="openshift-monitoring/metrics-server-6d8f7b4658-vq858" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.996832 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/c2d272dc-c249-4079-acc3-c218cf167a1e-secret-metrics-client-certs\") pod \"metrics-server-6d8f7b4658-vq858\" (UID: \"c2d272dc-c249-4079-acc3-c218cf167a1e\") " pod="openshift-monitoring/metrics-server-6d8f7b4658-vq858" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.997888 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/c2d272dc-c249-4079-acc3-c218cf167a1e-audit-log\") pod \"metrics-server-6d8f7b4658-vq858\" (UID: \"c2d272dc-c249-4079-acc3-c218cf167a1e\") " pod="openshift-monitoring/metrics-server-6d8f7b4658-vq858" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.999170 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c2d272dc-c249-4079-acc3-c218cf167a1e-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-6d8f7b4658-vq858\" (UID: \"c2d272dc-c249-4079-acc3-c218cf167a1e\") " pod="openshift-monitoring/metrics-server-6d8f7b4658-vq858" Nov 26 22:26:23 crc kubenswrapper[4903]: I1126 22:26:23.999851 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/c2d272dc-c249-4079-acc3-c218cf167a1e-metrics-server-audit-profiles\") pod \"metrics-server-6d8f7b4658-vq858\" (UID: \"c2d272dc-c249-4079-acc3-c218cf167a1e\") " pod="openshift-monitoring/metrics-server-6d8f7b4658-vq858" Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.002802 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/c2d272dc-c249-4079-acc3-c218cf167a1e-secret-metrics-server-tls\") pod \"metrics-server-6d8f7b4658-vq858\" (UID: \"c2d272dc-c249-4079-acc3-c218cf167a1e\") " pod="openshift-monitoring/metrics-server-6d8f7b4658-vq858" Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.003031 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2d272dc-c249-4079-acc3-c218cf167a1e-client-ca-bundle\") pod \"metrics-server-6d8f7b4658-vq858\" (UID: \"c2d272dc-c249-4079-acc3-c218cf167a1e\") " pod="openshift-monitoring/metrics-server-6d8f7b4658-vq858" Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.009640 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/c2d272dc-c249-4079-acc3-c218cf167a1e-secret-metrics-client-certs\") pod \"metrics-server-6d8f7b4658-vq858\" (UID: \"c2d272dc-c249-4079-acc3-c218cf167a1e\") " pod="openshift-monitoring/metrics-server-6d8f7b4658-vq858" Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.012338 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bj6rc\" (UniqueName: \"kubernetes.io/projected/c2d272dc-c249-4079-acc3-c218cf167a1e-kube-api-access-bj6rc\") pod \"metrics-server-6d8f7b4658-vq858\" (UID: \"c2d272dc-c249-4079-acc3-c218cf167a1e\") " pod="openshift-monitoring/metrics-server-6d8f7b4658-vq858" Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.099883 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/metrics-server-6d8f7b4658-vq858" Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.120585 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-869cd9b9c9-pnxtx"] Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.268235 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/monitoring-plugin-87cf6ccb5-ntndd"] Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.269170 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/monitoring-plugin-87cf6ccb5-ntndd" Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.271989 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"monitoring-plugin-cert" Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.272381 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"default-dockercfg-6tstp" Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.276161 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/monitoring-plugin-87cf6ccb5-ntndd"] Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.378209 4903 generic.go:334] "Generic (PLEG): container finished" podID="cab70503-fab1-47f5-86d6-c6181d0fc1a3" containerID="1e324e2824ec6915b8439d2c61511517258c3ca50db341ad015c3d253a8cf367" exitCode=0 Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.378321 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"cab70503-fab1-47f5-86d6-c6181d0fc1a3","Type":"ContainerDied","Data":"1e324e2824ec6915b8439d2c61511517258c3ca50db341ad015c3d253a8cf367"} Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.402476 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/54fabb6b-81ad-4481-ac7e-9469f288be63-monitoring-plugin-cert\") pod \"monitoring-plugin-87cf6ccb5-ntndd\" (UID: \"54fabb6b-81ad-4481-ac7e-9469f288be63\") " pod="openshift-monitoring/monitoring-plugin-87cf6ccb5-ntndd" Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.504105 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/54fabb6b-81ad-4481-ac7e-9469f288be63-monitoring-plugin-cert\") pod \"monitoring-plugin-87cf6ccb5-ntndd\" (UID: \"54fabb6b-81ad-4481-ac7e-9469f288be63\") " pod="openshift-monitoring/monitoring-plugin-87cf6ccb5-ntndd" Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.509271 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/54fabb6b-81ad-4481-ac7e-9469f288be63-monitoring-plugin-cert\") pod \"monitoring-plugin-87cf6ccb5-ntndd\" (UID: \"54fabb6b-81ad-4481-ac7e-9469f288be63\") " pod="openshift-monitoring/monitoring-plugin-87cf6ccb5-ntndd" Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.604934 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/monitoring-plugin-87cf6ccb5-ntndd" Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.733268 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/metrics-server-6d8f7b4658-vq858"] Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.847252 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/monitoring-plugin-87cf6ccb5-ntndd"] Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.859572 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Nov 26 22:26:24 crc kubenswrapper[4903]: W1126 22:26:24.859980 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod54fabb6b_81ad_4481_ac7e_9469f288be63.slice/crio-11c276067c02fcaf2421ef263510d2cd9005402bd6da38155ebb381c0849ec4c WatchSource:0}: Error finding container 11c276067c02fcaf2421ef263510d2cd9005402bd6da38155ebb381c0849ec4c: Status 404 returned error can't find the container with id 11c276067c02fcaf2421ef263510d2cd9005402bd6da38155ebb381c0849ec4c Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.863181 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.867615 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-grpc-tls-e9tj3rmasj09h" Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.867761 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-thanos-prometheus-http-client-file" Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.871072 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-rbac-proxy" Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.871761 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-web-config" Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.871861 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"serving-certs-ca-bundle" Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.872059 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-tls-assets-0" Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.872150 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-thanos-sidecar-tls" Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.872237 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-tls" Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.872344 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-kube-rbac-proxy-web" Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.873549 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s" Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.876037 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"prometheus-trusted-ca-bundle" Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.878111 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"prometheus-k8s-rulefiles-0" Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.878238 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-dockercfg-2pcg5" Nov 26 22:26:24 crc kubenswrapper[4903]: I1126 22:26:24.887284 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.012453 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wpxrt\" (UniqueName: \"kubernetes.io/projected/b0ab6961-7182-4a45-bc22-bf1ec633dcae-kube-api-access-wpxrt\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.012826 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/b0ab6961-7182-4a45-bc22-bf1ec633dcae-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.012857 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b0ab6961-7182-4a45-bc22-bf1ec633dcae-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.012881 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/b0ab6961-7182-4a45-bc22-bf1ec633dcae-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.012905 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b0ab6961-7182-4a45-bc22-bf1ec633dcae-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.012989 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/b0ab6961-7182-4a45-bc22-bf1ec633dcae-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.013017 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b0ab6961-7182-4a45-bc22-bf1ec633dcae-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.013038 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/b0ab6961-7182-4a45-bc22-bf1ec633dcae-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.013067 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b0ab6961-7182-4a45-bc22-bf1ec633dcae-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.013088 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b0ab6961-7182-4a45-bc22-bf1ec633dcae-web-config\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.013187 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/b0ab6961-7182-4a45-bc22-bf1ec633dcae-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.013208 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b0ab6961-7182-4a45-bc22-bf1ec633dcae-config\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.013225 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/b0ab6961-7182-4a45-bc22-bf1ec633dcae-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.013283 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/b0ab6961-7182-4a45-bc22-bf1ec633dcae-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.013304 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/b0ab6961-7182-4a45-bc22-bf1ec633dcae-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.013325 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b0ab6961-7182-4a45-bc22-bf1ec633dcae-config-out\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.013366 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/b0ab6961-7182-4a45-bc22-bf1ec633dcae-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.013389 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/b0ab6961-7182-4a45-bc22-bf1ec633dcae-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.114432 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b0ab6961-7182-4a45-bc22-bf1ec633dcae-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.114490 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b0ab6961-7182-4a45-bc22-bf1ec633dcae-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.114516 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/b0ab6961-7182-4a45-bc22-bf1ec633dcae-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.114539 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/b0ab6961-7182-4a45-bc22-bf1ec633dcae-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.114577 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b0ab6961-7182-4a45-bc22-bf1ec633dcae-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.114612 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b0ab6961-7182-4a45-bc22-bf1ec633dcae-web-config\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.114648 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/b0ab6961-7182-4a45-bc22-bf1ec633dcae-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.114676 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b0ab6961-7182-4a45-bc22-bf1ec633dcae-config\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.114726 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/b0ab6961-7182-4a45-bc22-bf1ec633dcae-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.114788 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/b0ab6961-7182-4a45-bc22-bf1ec633dcae-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.114836 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/b0ab6961-7182-4a45-bc22-bf1ec633dcae-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.114875 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b0ab6961-7182-4a45-bc22-bf1ec633dcae-config-out\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.114915 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/b0ab6961-7182-4a45-bc22-bf1ec633dcae-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.114952 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/b0ab6961-7182-4a45-bc22-bf1ec633dcae-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.114992 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wpxrt\" (UniqueName: \"kubernetes.io/projected/b0ab6961-7182-4a45-bc22-bf1ec633dcae-kube-api-access-wpxrt\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.115028 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/b0ab6961-7182-4a45-bc22-bf1ec633dcae-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.115071 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b0ab6961-7182-4a45-bc22-bf1ec633dcae-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.115118 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/b0ab6961-7182-4a45-bc22-bf1ec633dcae-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.115182 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/b0ab6961-7182-4a45-bc22-bf1ec633dcae-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.116422 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/b0ab6961-7182-4a45-bc22-bf1ec633dcae-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.116796 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b0ab6961-7182-4a45-bc22-bf1ec633dcae-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.116844 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b0ab6961-7182-4a45-bc22-bf1ec633dcae-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.117172 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b0ab6961-7182-4a45-bc22-bf1ec633dcae-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.121170 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/b0ab6961-7182-4a45-bc22-bf1ec633dcae-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.121667 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b0ab6961-7182-4a45-bc22-bf1ec633dcae-web-config\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.122109 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/b0ab6961-7182-4a45-bc22-bf1ec633dcae-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.127219 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/b0ab6961-7182-4a45-bc22-bf1ec633dcae-config\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.127937 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b0ab6961-7182-4a45-bc22-bf1ec633dcae-config-out\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.128125 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/b0ab6961-7182-4a45-bc22-bf1ec633dcae-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.128773 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/b0ab6961-7182-4a45-bc22-bf1ec633dcae-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.128947 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/b0ab6961-7182-4a45-bc22-bf1ec633dcae-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.129136 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/b0ab6961-7182-4a45-bc22-bf1ec633dcae-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.129166 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/b0ab6961-7182-4a45-bc22-bf1ec633dcae-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.129515 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b0ab6961-7182-4a45-bc22-bf1ec633dcae-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.129723 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/b0ab6961-7182-4a45-bc22-bf1ec633dcae-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.134857 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wpxrt\" (UniqueName: \"kubernetes.io/projected/b0ab6961-7182-4a45-bc22-bf1ec633dcae-kube-api-access-wpxrt\") pod \"prometheus-k8s-0\" (UID: \"b0ab6961-7182-4a45-bc22-bf1ec633dcae\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.193562 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.387751 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/metrics-server-6d8f7b4658-vq858" event={"ID":"c2d272dc-c249-4079-acc3-c218cf167a1e","Type":"ContainerStarted","Data":"5e648b809a4e84cfbcd51b0f14a72f42ab6cbc978b963b0b4413a403fc44d4e8"} Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.392849 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-869cd9b9c9-pnxtx" event={"ID":"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8","Type":"ContainerStarted","Data":"a603866dd9ad482c5a6b168676dac6de6e309f1d336497d23614f3efbe785210"} Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.392906 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-869cd9b9c9-pnxtx" event={"ID":"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8","Type":"ContainerStarted","Data":"30c0914b34db8819f98239558304a9dc8c3f4e04559d95fef0e1ada258befe77"} Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.399974 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" event={"ID":"224db6b5-404e-4a5f-be90-342998388a71","Type":"ContainerStarted","Data":"687486e305e7a99323f4882ef59b1e78ca8888be1bf7904de1d8dfefb518907c"} Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.400009 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" event={"ID":"224db6b5-404e-4a5f-be90-342998388a71","Type":"ContainerStarted","Data":"f0198c2fca83bdf09a33979db96ca73cfa9756b01352e30de6c8bfcb2466ec0f"} Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.400022 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" event={"ID":"224db6b5-404e-4a5f-be90-342998388a71","Type":"ContainerStarted","Data":"085370004edc2cc3d908e4a28fb00ce6c3243a32fcee22510d547c5d0be8641a"} Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.401886 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/monitoring-plugin-87cf6ccb5-ntndd" event={"ID":"54fabb6b-81ad-4481-ac7e-9469f288be63","Type":"ContainerStarted","Data":"11c276067c02fcaf2421ef263510d2cd9005402bd6da38155ebb381c0849ec4c"} Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.413641 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-869cd9b9c9-pnxtx" podStartSLOduration=2.413594896 podStartE2EDuration="2.413594896s" podCreationTimestamp="2025-11-26 22:26:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:26:25.408846376 +0000 UTC m=+314.099081296" watchObservedRunningTime="2025-11-26 22:26:25.413594896 +0000 UTC m=+314.103829816" Nov 26 22:26:25 crc kubenswrapper[4903]: I1126 22:26:25.700411 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Nov 26 22:26:26 crc kubenswrapper[4903]: I1126 22:26:26.409440 4903 generic.go:334] "Generic (PLEG): container finished" podID="b0ab6961-7182-4a45-bc22-bf1ec633dcae" containerID="89578b73db5c631cbc86c9ac619842b16b4e578f83d36a0a7fba6981207ac8d0" exitCode=0 Nov 26 22:26:26 crc kubenswrapper[4903]: I1126 22:26:26.409499 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"b0ab6961-7182-4a45-bc22-bf1ec633dcae","Type":"ContainerDied","Data":"89578b73db5c631cbc86c9ac619842b16b4e578f83d36a0a7fba6981207ac8d0"} Nov 26 22:26:26 crc kubenswrapper[4903]: I1126 22:26:26.409852 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"b0ab6961-7182-4a45-bc22-bf1ec633dcae","Type":"ContainerStarted","Data":"857f97f720d6cdb650a982c27ea84db5de66152e0ef2d7f3122a6fa5950198a7"} Nov 26 22:26:28 crc kubenswrapper[4903]: I1126 22:26:28.259669 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-l9g4x" Nov 26 22:26:28 crc kubenswrapper[4903]: I1126 22:26:28.351627 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-qb8qh"] Nov 26 22:26:29 crc kubenswrapper[4903]: I1126 22:26:29.436304 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"cab70503-fab1-47f5-86d6-c6181d0fc1a3","Type":"ContainerStarted","Data":"e9ea17d356a64ac7f435d693039a68d51faf88be35f05d74d596d73a676e00bb"} Nov 26 22:26:29 crc kubenswrapper[4903]: I1126 22:26:29.436770 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"cab70503-fab1-47f5-86d6-c6181d0fc1a3","Type":"ContainerStarted","Data":"76514a033bf2404774f6a248d247b5254f818e7fec2b9c685ab3d9dcb858f70e"} Nov 26 22:26:29 crc kubenswrapper[4903]: I1126 22:26:29.436780 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"cab70503-fab1-47f5-86d6-c6181d0fc1a3","Type":"ContainerStarted","Data":"1d0225badeebf1b4246afead435d778b2a8355222316592ed85082b1b78d9393"} Nov 26 22:26:29 crc kubenswrapper[4903]: I1126 22:26:29.442312 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" event={"ID":"224db6b5-404e-4a5f-be90-342998388a71","Type":"ContainerStarted","Data":"e59c2eccc39469aab5ae123718df3848df3dc5a5cb8dcbc7d5964845da8bae15"} Nov 26 22:26:29 crc kubenswrapper[4903]: I1126 22:26:29.442351 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" event={"ID":"224db6b5-404e-4a5f-be90-342998388a71","Type":"ContainerStarted","Data":"2d4dcd989e7020b7175c2e2d1f900d3989cac37d07a23ed3cfc1fd1f35a24db4"} Nov 26 22:26:29 crc kubenswrapper[4903]: I1126 22:26:29.442363 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" event={"ID":"224db6b5-404e-4a5f-be90-342998388a71","Type":"ContainerStarted","Data":"71dc165c6721a5677892be92f770a803a9fa62c94b221647986a9cb9ddf058df"} Nov 26 22:26:29 crc kubenswrapper[4903]: I1126 22:26:29.442586 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" Nov 26 22:26:29 crc kubenswrapper[4903]: I1126 22:26:29.444246 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/monitoring-plugin-87cf6ccb5-ntndd" event={"ID":"54fabb6b-81ad-4481-ac7e-9469f288be63","Type":"ContainerStarted","Data":"d3e8f38e247da2ce7954a657a9afaa9152d7ab0e3caedc0802e39ae3ef4f383d"} Nov 26 22:26:29 crc kubenswrapper[4903]: I1126 22:26:29.445999 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/monitoring-plugin-87cf6ccb5-ntndd" Nov 26 22:26:29 crc kubenswrapper[4903]: I1126 22:26:29.447144 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/metrics-server-6d8f7b4658-vq858" event={"ID":"c2d272dc-c249-4079-acc3-c218cf167a1e","Type":"ContainerStarted","Data":"efa5785bfc6cdab8c72a107ab2591f9e7f73a011a6e06b1c23be7b9b7665ba5b"} Nov 26 22:26:29 crc kubenswrapper[4903]: I1126 22:26:29.452042 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/monitoring-plugin-87cf6ccb5-ntndd" Nov 26 22:26:29 crc kubenswrapper[4903]: I1126 22:26:29.456212 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" Nov 26 22:26:29 crc kubenswrapper[4903]: I1126 22:26:29.487109 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/thanos-querier-6c49876-bwx7j" podStartSLOduration=1.962319852 podStartE2EDuration="9.487094639s" podCreationTimestamp="2025-11-26 22:26:20 +0000 UTC" firstStartedPulling="2025-11-26 22:26:21.39228755 +0000 UTC m=+310.082522470" lastFinishedPulling="2025-11-26 22:26:28.917062327 +0000 UTC m=+317.607297257" observedRunningTime="2025-11-26 22:26:29.4633658 +0000 UTC m=+318.153600720" watchObservedRunningTime="2025-11-26 22:26:29.487094639 +0000 UTC m=+318.177329549" Nov 26 22:26:29 crc kubenswrapper[4903]: I1126 22:26:29.540530 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/metrics-server-6d8f7b4658-vq858" podStartSLOduration=2.39146954 podStartE2EDuration="6.54050681s" podCreationTimestamp="2025-11-26 22:26:23 +0000 UTC" firstStartedPulling="2025-11-26 22:26:24.756088171 +0000 UTC m=+313.446323081" lastFinishedPulling="2025-11-26 22:26:28.905125431 +0000 UTC m=+317.595360351" observedRunningTime="2025-11-26 22:26:29.527597187 +0000 UTC m=+318.217832097" watchObservedRunningTime="2025-11-26 22:26:29.54050681 +0000 UTC m=+318.230741720" Nov 26 22:26:29 crc kubenswrapper[4903]: I1126 22:26:29.552653 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/monitoring-plugin-87cf6ccb5-ntndd" podStartSLOduration=1.494372735 podStartE2EDuration="5.552631792s" podCreationTimestamp="2025-11-26 22:26:24 +0000 UTC" firstStartedPulling="2025-11-26 22:26:24.87452304 +0000 UTC m=+313.564757950" lastFinishedPulling="2025-11-26 22:26:28.932782097 +0000 UTC m=+317.623017007" observedRunningTime="2025-11-26 22:26:29.544070168 +0000 UTC m=+318.234305088" watchObservedRunningTime="2025-11-26 22:26:29.552631792 +0000 UTC m=+318.242866702" Nov 26 22:26:30 crc kubenswrapper[4903]: I1126 22:26:30.457429 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"cab70503-fab1-47f5-86d6-c6181d0fc1a3","Type":"ContainerStarted","Data":"f845c87b3e4f72d17eb6e3e7778b7b8e7a247f013cccb0d68eadf07ffe2858ff"} Nov 26 22:26:30 crc kubenswrapper[4903]: I1126 22:26:30.457499 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"cab70503-fab1-47f5-86d6-c6181d0fc1a3","Type":"ContainerStarted","Data":"f36c64c9371228c49a5966f81b3c137378531cb71bc9e0b8d222c8d4a832fc53"} Nov 26 22:26:30 crc kubenswrapper[4903]: I1126 22:26:30.457517 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"cab70503-fab1-47f5-86d6-c6181d0fc1a3","Type":"ContainerStarted","Data":"a1fa59e7c9efe7428c431c6746aca504792bf9d9cfc0e409ff616c36a2c3af72"} Nov 26 22:26:30 crc kubenswrapper[4903]: I1126 22:26:30.490629 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/alertmanager-main-0" podStartSLOduration=2.785075685 podStartE2EDuration="11.490605929s" podCreationTimestamp="2025-11-26 22:26:19 +0000 UTC" firstStartedPulling="2025-11-26 22:26:20.225815323 +0000 UTC m=+308.916050233" lastFinishedPulling="2025-11-26 22:26:28.931345567 +0000 UTC m=+317.621580477" observedRunningTime="2025-11-26 22:26:30.487150474 +0000 UTC m=+319.177385404" watchObservedRunningTime="2025-11-26 22:26:30.490605929 +0000 UTC m=+319.180840849" Nov 26 22:26:32 crc kubenswrapper[4903]: I1126 22:26:32.479682 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"b0ab6961-7182-4a45-bc22-bf1ec633dcae","Type":"ContainerStarted","Data":"a17aa736b692005c2cc6d50ecc81c9a1fd32ae179ee37237a42a3fca13daf880"} Nov 26 22:26:32 crc kubenswrapper[4903]: I1126 22:26:32.479738 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"b0ab6961-7182-4a45-bc22-bf1ec633dcae","Type":"ContainerStarted","Data":"d5a73797be79ded4489d7048a478b98122bfa002f6d775f458776d0717b41340"} Nov 26 22:26:32 crc kubenswrapper[4903]: I1126 22:26:32.479749 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"b0ab6961-7182-4a45-bc22-bf1ec633dcae","Type":"ContainerStarted","Data":"7b106199291aa0a0ceb823ad12467c9fc72fed1db2b5ff48afe2f35006686bcb"} Nov 26 22:26:32 crc kubenswrapper[4903]: I1126 22:26:32.479759 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"b0ab6961-7182-4a45-bc22-bf1ec633dcae","Type":"ContainerStarted","Data":"59d20b1db4b6d5c8c2150289ac99d9a95583f3ccd9cd1fa773e3098cb1bc2cba"} Nov 26 22:26:32 crc kubenswrapper[4903]: I1126 22:26:32.479768 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"b0ab6961-7182-4a45-bc22-bf1ec633dcae","Type":"ContainerStarted","Data":"3b5b44a23859a48bf2369d3149a1d233ceec5c00a293616f91e1bc83af43cc6f"} Nov 26 22:26:32 crc kubenswrapper[4903]: I1126 22:26:32.479777 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"b0ab6961-7182-4a45-bc22-bf1ec633dcae","Type":"ContainerStarted","Data":"306ede3973e2b6adf0c5102c418d6b395c1b3b64678bf72e2fec43b24afc563c"} Nov 26 22:26:32 crc kubenswrapper[4903]: I1126 22:26:32.526527 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-k8s-0" podStartSLOduration=3.527645592 podStartE2EDuration="8.526507197s" podCreationTimestamp="2025-11-26 22:26:24 +0000 UTC" firstStartedPulling="2025-11-26 22:26:26.410538186 +0000 UTC m=+315.100773096" lastFinishedPulling="2025-11-26 22:26:31.409399781 +0000 UTC m=+320.099634701" observedRunningTime="2025-11-26 22:26:32.516158475 +0000 UTC m=+321.206393385" watchObservedRunningTime="2025-11-26 22:26:32.526507197 +0000 UTC m=+321.216742117" Nov 26 22:26:33 crc kubenswrapper[4903]: I1126 22:26:33.634934 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-869cd9b9c9-pnxtx" Nov 26 22:26:33 crc kubenswrapper[4903]: I1126 22:26:33.635354 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-869cd9b9c9-pnxtx" Nov 26 22:26:33 crc kubenswrapper[4903]: I1126 22:26:33.644034 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-869cd9b9c9-pnxtx" Nov 26 22:26:34 crc kubenswrapper[4903]: I1126 22:26:34.501824 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-869cd9b9c9-pnxtx" Nov 26 22:26:34 crc kubenswrapper[4903]: I1126 22:26:34.589685 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-sfnff"] Nov 26 22:26:35 crc kubenswrapper[4903]: I1126 22:26:35.195807 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:26:44 crc kubenswrapper[4903]: I1126 22:26:44.100503 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-monitoring/metrics-server-6d8f7b4658-vq858" Nov 26 22:26:44 crc kubenswrapper[4903]: I1126 22:26:44.101260 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/metrics-server-6d8f7b4658-vq858" Nov 26 22:26:53 crc kubenswrapper[4903]: I1126 22:26:53.414466 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" podUID="946ffb39-1ab9-4606-aeba-77e75d32fa17" containerName="registry" containerID="cri-o://236d33ede4cdc7114d7bf73ae508b5fbeca9f01b32d5920a69a60321053468e3" gracePeriod=30 Nov 26 22:26:53 crc kubenswrapper[4903]: I1126 22:26:53.658389 4903 generic.go:334] "Generic (PLEG): container finished" podID="946ffb39-1ab9-4606-aeba-77e75d32fa17" containerID="236d33ede4cdc7114d7bf73ae508b5fbeca9f01b32d5920a69a60321053468e3" exitCode=0 Nov 26 22:26:53 crc kubenswrapper[4903]: I1126 22:26:53.658442 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" event={"ID":"946ffb39-1ab9-4606-aeba-77e75d32fa17","Type":"ContainerDied","Data":"236d33ede4cdc7114d7bf73ae508b5fbeca9f01b32d5920a69a60321053468e3"} Nov 26 22:26:54 crc kubenswrapper[4903]: I1126 22:26:54.376846 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:26:54 crc kubenswrapper[4903]: I1126 22:26:54.551193 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g8bcv\" (UniqueName: \"kubernetes.io/projected/946ffb39-1ab9-4606-aeba-77e75d32fa17-kube-api-access-g8bcv\") pod \"946ffb39-1ab9-4606-aeba-77e75d32fa17\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " Nov 26 22:26:54 crc kubenswrapper[4903]: I1126 22:26:54.551293 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/946ffb39-1ab9-4606-aeba-77e75d32fa17-registry-certificates\") pod \"946ffb39-1ab9-4606-aeba-77e75d32fa17\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " Nov 26 22:26:54 crc kubenswrapper[4903]: I1126 22:26:54.551313 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/946ffb39-1ab9-4606-aeba-77e75d32fa17-registry-tls\") pod \"946ffb39-1ab9-4606-aeba-77e75d32fa17\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " Nov 26 22:26:54 crc kubenswrapper[4903]: I1126 22:26:54.551357 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/946ffb39-1ab9-4606-aeba-77e75d32fa17-bound-sa-token\") pod \"946ffb39-1ab9-4606-aeba-77e75d32fa17\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " Nov 26 22:26:54 crc kubenswrapper[4903]: I1126 22:26:54.551386 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/946ffb39-1ab9-4606-aeba-77e75d32fa17-trusted-ca\") pod \"946ffb39-1ab9-4606-aeba-77e75d32fa17\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " Nov 26 22:26:54 crc kubenswrapper[4903]: I1126 22:26:54.551411 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/946ffb39-1ab9-4606-aeba-77e75d32fa17-installation-pull-secrets\") pod \"946ffb39-1ab9-4606-aeba-77e75d32fa17\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " Nov 26 22:26:54 crc kubenswrapper[4903]: I1126 22:26:54.551486 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/946ffb39-1ab9-4606-aeba-77e75d32fa17-ca-trust-extracted\") pod \"946ffb39-1ab9-4606-aeba-77e75d32fa17\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " Nov 26 22:26:54 crc kubenswrapper[4903]: I1126 22:26:54.551621 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"946ffb39-1ab9-4606-aeba-77e75d32fa17\" (UID: \"946ffb39-1ab9-4606-aeba-77e75d32fa17\") " Nov 26 22:26:54 crc kubenswrapper[4903]: I1126 22:26:54.552852 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/946ffb39-1ab9-4606-aeba-77e75d32fa17-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "946ffb39-1ab9-4606-aeba-77e75d32fa17" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:26:54 crc kubenswrapper[4903]: I1126 22:26:54.553117 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/946ffb39-1ab9-4606-aeba-77e75d32fa17-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "946ffb39-1ab9-4606-aeba-77e75d32fa17" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:26:54 crc kubenswrapper[4903]: I1126 22:26:54.558896 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/946ffb39-1ab9-4606-aeba-77e75d32fa17-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "946ffb39-1ab9-4606-aeba-77e75d32fa17" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:26:54 crc kubenswrapper[4903]: I1126 22:26:54.559918 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/946ffb39-1ab9-4606-aeba-77e75d32fa17-kube-api-access-g8bcv" (OuterVolumeSpecName: "kube-api-access-g8bcv") pod "946ffb39-1ab9-4606-aeba-77e75d32fa17" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17"). InnerVolumeSpecName "kube-api-access-g8bcv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:26:54 crc kubenswrapper[4903]: I1126 22:26:54.561573 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/946ffb39-1ab9-4606-aeba-77e75d32fa17-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "946ffb39-1ab9-4606-aeba-77e75d32fa17" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:26:54 crc kubenswrapper[4903]: I1126 22:26:54.561940 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/946ffb39-1ab9-4606-aeba-77e75d32fa17-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "946ffb39-1ab9-4606-aeba-77e75d32fa17" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:26:54 crc kubenswrapper[4903]: I1126 22:26:54.568631 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "946ffb39-1ab9-4606-aeba-77e75d32fa17" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 26 22:26:54 crc kubenswrapper[4903]: I1126 22:26:54.574954 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/946ffb39-1ab9-4606-aeba-77e75d32fa17-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "946ffb39-1ab9-4606-aeba-77e75d32fa17" (UID: "946ffb39-1ab9-4606-aeba-77e75d32fa17"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:26:54 crc kubenswrapper[4903]: I1126 22:26:54.653255 4903 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/946ffb39-1ab9-4606-aeba-77e75d32fa17-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 26 22:26:54 crc kubenswrapper[4903]: I1126 22:26:54.653321 4903 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/946ffb39-1ab9-4606-aeba-77e75d32fa17-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 26 22:26:54 crc kubenswrapper[4903]: I1126 22:26:54.653343 4903 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/946ffb39-1ab9-4606-aeba-77e75d32fa17-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 22:26:54 crc kubenswrapper[4903]: I1126 22:26:54.653363 4903 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/946ffb39-1ab9-4606-aeba-77e75d32fa17-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 26 22:26:54 crc kubenswrapper[4903]: I1126 22:26:54.653382 4903 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/946ffb39-1ab9-4606-aeba-77e75d32fa17-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 26 22:26:54 crc kubenswrapper[4903]: I1126 22:26:54.653400 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g8bcv\" (UniqueName: \"kubernetes.io/projected/946ffb39-1ab9-4606-aeba-77e75d32fa17-kube-api-access-g8bcv\") on node \"crc\" DevicePath \"\"" Nov 26 22:26:54 crc kubenswrapper[4903]: I1126 22:26:54.653417 4903 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/946ffb39-1ab9-4606-aeba-77e75d32fa17-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 26 22:26:54 crc kubenswrapper[4903]: I1126 22:26:54.667660 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" event={"ID":"946ffb39-1ab9-4606-aeba-77e75d32fa17","Type":"ContainerDied","Data":"aed33e18f6006586cc084039051a2f8afd00447d40a1179ab90e7d8cc4b3c0ca"} Nov 26 22:26:54 crc kubenswrapper[4903]: I1126 22:26:54.667773 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-qb8qh" Nov 26 22:26:54 crc kubenswrapper[4903]: I1126 22:26:54.668178 4903 scope.go:117] "RemoveContainer" containerID="236d33ede4cdc7114d7bf73ae508b5fbeca9f01b32d5920a69a60321053468e3" Nov 26 22:26:54 crc kubenswrapper[4903]: I1126 22:26:54.725235 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-qb8qh"] Nov 26 22:26:54 crc kubenswrapper[4903]: I1126 22:26:54.728981 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-qb8qh"] Nov 26 22:26:56 crc kubenswrapper[4903]: I1126 22:26:56.042050 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="946ffb39-1ab9-4606-aeba-77e75d32fa17" path="/var/lib/kubelet/pods/946ffb39-1ab9-4606-aeba-77e75d32fa17/volumes" Nov 26 22:26:59 crc kubenswrapper[4903]: I1126 22:26:59.638469 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-sfnff" podUID="1293736c-513c-490e-afb1-97df72e3e51c" containerName="console" containerID="cri-o://0c65a672201deda910bae1751acee4707d74e6e324dc902e6ac61917271d791f" gracePeriod=15 Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.151457 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-sfnff_1293736c-513c-490e-afb1-97df72e3e51c/console/0.log" Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.152109 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-sfnff" Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.343633 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1293736c-513c-490e-afb1-97df72e3e51c-oauth-serving-cert\") pod \"1293736c-513c-490e-afb1-97df72e3e51c\" (UID: \"1293736c-513c-490e-afb1-97df72e3e51c\") " Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.343782 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1293736c-513c-490e-afb1-97df72e3e51c-console-config\") pod \"1293736c-513c-490e-afb1-97df72e3e51c\" (UID: \"1293736c-513c-490e-afb1-97df72e3e51c\") " Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.343843 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f2mdb\" (UniqueName: \"kubernetes.io/projected/1293736c-513c-490e-afb1-97df72e3e51c-kube-api-access-f2mdb\") pod \"1293736c-513c-490e-afb1-97df72e3e51c\" (UID: \"1293736c-513c-490e-afb1-97df72e3e51c\") " Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.343894 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1293736c-513c-490e-afb1-97df72e3e51c-console-serving-cert\") pod \"1293736c-513c-490e-afb1-97df72e3e51c\" (UID: \"1293736c-513c-490e-afb1-97df72e3e51c\") " Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.343950 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1293736c-513c-490e-afb1-97df72e3e51c-service-ca\") pod \"1293736c-513c-490e-afb1-97df72e3e51c\" (UID: \"1293736c-513c-490e-afb1-97df72e3e51c\") " Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.344013 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1293736c-513c-490e-afb1-97df72e3e51c-trusted-ca-bundle\") pod \"1293736c-513c-490e-afb1-97df72e3e51c\" (UID: \"1293736c-513c-490e-afb1-97df72e3e51c\") " Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.344087 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1293736c-513c-490e-afb1-97df72e3e51c-console-oauth-config\") pod \"1293736c-513c-490e-afb1-97df72e3e51c\" (UID: \"1293736c-513c-490e-afb1-97df72e3e51c\") " Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.344737 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1293736c-513c-490e-afb1-97df72e3e51c-service-ca" (OuterVolumeSpecName: "service-ca") pod "1293736c-513c-490e-afb1-97df72e3e51c" (UID: "1293736c-513c-490e-afb1-97df72e3e51c"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.344775 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1293736c-513c-490e-afb1-97df72e3e51c-console-config" (OuterVolumeSpecName: "console-config") pod "1293736c-513c-490e-afb1-97df72e3e51c" (UID: "1293736c-513c-490e-afb1-97df72e3e51c"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.344808 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1293736c-513c-490e-afb1-97df72e3e51c-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "1293736c-513c-490e-afb1-97df72e3e51c" (UID: "1293736c-513c-490e-afb1-97df72e3e51c"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.345033 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1293736c-513c-490e-afb1-97df72e3e51c-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1293736c-513c-490e-afb1-97df72e3e51c" (UID: "1293736c-513c-490e-afb1-97df72e3e51c"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.349685 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1293736c-513c-490e-afb1-97df72e3e51c-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "1293736c-513c-490e-afb1-97df72e3e51c" (UID: "1293736c-513c-490e-afb1-97df72e3e51c"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.349989 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1293736c-513c-490e-afb1-97df72e3e51c-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "1293736c-513c-490e-afb1-97df72e3e51c" (UID: "1293736c-513c-490e-afb1-97df72e3e51c"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.368553 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1293736c-513c-490e-afb1-97df72e3e51c-kube-api-access-f2mdb" (OuterVolumeSpecName: "kube-api-access-f2mdb") pod "1293736c-513c-490e-afb1-97df72e3e51c" (UID: "1293736c-513c-490e-afb1-97df72e3e51c"). InnerVolumeSpecName "kube-api-access-f2mdb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.446347 4903 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1293736c-513c-490e-afb1-97df72e3e51c-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.446386 4903 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1293736c-513c-490e-afb1-97df72e3e51c-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.446401 4903 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1293736c-513c-490e-afb1-97df72e3e51c-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.446415 4903 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1293736c-513c-490e-afb1-97df72e3e51c-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.446427 4903 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1293736c-513c-490e-afb1-97df72e3e51c-console-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.446438 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f2mdb\" (UniqueName: \"kubernetes.io/projected/1293736c-513c-490e-afb1-97df72e3e51c-kube-api-access-f2mdb\") on node \"crc\" DevicePath \"\"" Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.446450 4903 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1293736c-513c-490e-afb1-97df72e3e51c-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.717808 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-sfnff_1293736c-513c-490e-afb1-97df72e3e51c/console/0.log" Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.717866 4903 generic.go:334] "Generic (PLEG): container finished" podID="1293736c-513c-490e-afb1-97df72e3e51c" containerID="0c65a672201deda910bae1751acee4707d74e6e324dc902e6ac61917271d791f" exitCode=2 Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.717902 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-sfnff" event={"ID":"1293736c-513c-490e-afb1-97df72e3e51c","Type":"ContainerDied","Data":"0c65a672201deda910bae1751acee4707d74e6e324dc902e6ac61917271d791f"} Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.717931 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-sfnff" event={"ID":"1293736c-513c-490e-afb1-97df72e3e51c","Type":"ContainerDied","Data":"93994046d4391371554c217ef1cfa80caec2a5ba801ddbe3bf292392733b4bcd"} Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.717955 4903 scope.go:117] "RemoveContainer" containerID="0c65a672201deda910bae1751acee4707d74e6e324dc902e6ac61917271d791f" Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.717981 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-sfnff" Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.750799 4903 scope.go:117] "RemoveContainer" containerID="0c65a672201deda910bae1751acee4707d74e6e324dc902e6ac61917271d791f" Nov 26 22:27:00 crc kubenswrapper[4903]: E1126 22:27:00.751798 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c65a672201deda910bae1751acee4707d74e6e324dc902e6ac61917271d791f\": container with ID starting with 0c65a672201deda910bae1751acee4707d74e6e324dc902e6ac61917271d791f not found: ID does not exist" containerID="0c65a672201deda910bae1751acee4707d74e6e324dc902e6ac61917271d791f" Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.751842 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c65a672201deda910bae1751acee4707d74e6e324dc902e6ac61917271d791f"} err="failed to get container status \"0c65a672201deda910bae1751acee4707d74e6e324dc902e6ac61917271d791f\": rpc error: code = NotFound desc = could not find container \"0c65a672201deda910bae1751acee4707d74e6e324dc902e6ac61917271d791f\": container with ID starting with 0c65a672201deda910bae1751acee4707d74e6e324dc902e6ac61917271d791f not found: ID does not exist" Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.762730 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-sfnff"] Nov 26 22:27:00 crc kubenswrapper[4903]: I1126 22:27:00.767821 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-sfnff"] Nov 26 22:27:02 crc kubenswrapper[4903]: I1126 22:27:02.040466 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1293736c-513c-490e-afb1-97df72e3e51c" path="/var/lib/kubelet/pods/1293736c-513c-490e-afb1-97df72e3e51c/volumes" Nov 26 22:27:04 crc kubenswrapper[4903]: I1126 22:27:04.109983 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-monitoring/metrics-server-6d8f7b4658-vq858" Nov 26 22:27:04 crc kubenswrapper[4903]: I1126 22:27:04.123227 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/metrics-server-6d8f7b4658-vq858" Nov 26 22:27:25 crc kubenswrapper[4903]: I1126 22:27:25.195986 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:27:25 crc kubenswrapper[4903]: I1126 22:27:25.242844 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:27:25 crc kubenswrapper[4903]: I1126 22:27:25.952079 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/prometheus-k8s-0" Nov 26 22:27:31 crc kubenswrapper[4903]: I1126 22:27:31.981000 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 22:27:31 crc kubenswrapper[4903]: I1126 22:27:31.981853 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 22:27:47 crc kubenswrapper[4903]: I1126 22:27:47.621329 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-5dc67597bd-6lscg"] Nov 26 22:27:47 crc kubenswrapper[4903]: E1126 22:27:47.622764 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1293736c-513c-490e-afb1-97df72e3e51c" containerName="console" Nov 26 22:27:47 crc kubenswrapper[4903]: I1126 22:27:47.622818 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="1293736c-513c-490e-afb1-97df72e3e51c" containerName="console" Nov 26 22:27:47 crc kubenswrapper[4903]: E1126 22:27:47.622856 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="946ffb39-1ab9-4606-aeba-77e75d32fa17" containerName="registry" Nov 26 22:27:47 crc kubenswrapper[4903]: I1126 22:27:47.622873 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="946ffb39-1ab9-4606-aeba-77e75d32fa17" containerName="registry" Nov 26 22:27:47 crc kubenswrapper[4903]: I1126 22:27:47.623143 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="1293736c-513c-490e-afb1-97df72e3e51c" containerName="console" Nov 26 22:27:47 crc kubenswrapper[4903]: I1126 22:27:47.623182 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="946ffb39-1ab9-4606-aeba-77e75d32fa17" containerName="registry" Nov 26 22:27:47 crc kubenswrapper[4903]: I1126 22:27:47.624163 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5dc67597bd-6lscg" Nov 26 22:27:47 crc kubenswrapper[4903]: I1126 22:27:47.673535 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1c981989-99f6-4954-b225-9182997e82e0-service-ca\") pod \"console-5dc67597bd-6lscg\" (UID: \"1c981989-99f6-4954-b225-9182997e82e0\") " pod="openshift-console/console-5dc67597bd-6lscg" Nov 26 22:27:47 crc kubenswrapper[4903]: I1126 22:27:47.673585 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1c981989-99f6-4954-b225-9182997e82e0-trusted-ca-bundle\") pod \"console-5dc67597bd-6lscg\" (UID: \"1c981989-99f6-4954-b225-9182997e82e0\") " pod="openshift-console/console-5dc67597bd-6lscg" Nov 26 22:27:47 crc kubenswrapper[4903]: I1126 22:27:47.673658 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1c981989-99f6-4954-b225-9182997e82e0-console-oauth-config\") pod \"console-5dc67597bd-6lscg\" (UID: \"1c981989-99f6-4954-b225-9182997e82e0\") " pod="openshift-console/console-5dc67597bd-6lscg" Nov 26 22:27:47 crc kubenswrapper[4903]: I1126 22:27:47.673679 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1c981989-99f6-4954-b225-9182997e82e0-console-config\") pod \"console-5dc67597bd-6lscg\" (UID: \"1c981989-99f6-4954-b225-9182997e82e0\") " pod="openshift-console/console-5dc67597bd-6lscg" Nov 26 22:27:47 crc kubenswrapper[4903]: I1126 22:27:47.673725 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxnkj\" (UniqueName: \"kubernetes.io/projected/1c981989-99f6-4954-b225-9182997e82e0-kube-api-access-qxnkj\") pod \"console-5dc67597bd-6lscg\" (UID: \"1c981989-99f6-4954-b225-9182997e82e0\") " pod="openshift-console/console-5dc67597bd-6lscg" Nov 26 22:27:47 crc kubenswrapper[4903]: I1126 22:27:47.673770 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1c981989-99f6-4954-b225-9182997e82e0-oauth-serving-cert\") pod \"console-5dc67597bd-6lscg\" (UID: \"1c981989-99f6-4954-b225-9182997e82e0\") " pod="openshift-console/console-5dc67597bd-6lscg" Nov 26 22:27:47 crc kubenswrapper[4903]: I1126 22:27:47.673790 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1c981989-99f6-4954-b225-9182997e82e0-console-serving-cert\") pod \"console-5dc67597bd-6lscg\" (UID: \"1c981989-99f6-4954-b225-9182997e82e0\") " pod="openshift-console/console-5dc67597bd-6lscg" Nov 26 22:27:47 crc kubenswrapper[4903]: I1126 22:27:47.686898 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-5dc67597bd-6lscg"] Nov 26 22:27:47 crc kubenswrapper[4903]: I1126 22:27:47.774425 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1c981989-99f6-4954-b225-9182997e82e0-service-ca\") pod \"console-5dc67597bd-6lscg\" (UID: \"1c981989-99f6-4954-b225-9182997e82e0\") " pod="openshift-console/console-5dc67597bd-6lscg" Nov 26 22:27:47 crc kubenswrapper[4903]: I1126 22:27:47.774465 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1c981989-99f6-4954-b225-9182997e82e0-trusted-ca-bundle\") pod \"console-5dc67597bd-6lscg\" (UID: \"1c981989-99f6-4954-b225-9182997e82e0\") " pod="openshift-console/console-5dc67597bd-6lscg" Nov 26 22:27:47 crc kubenswrapper[4903]: I1126 22:27:47.774505 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1c981989-99f6-4954-b225-9182997e82e0-console-oauth-config\") pod \"console-5dc67597bd-6lscg\" (UID: \"1c981989-99f6-4954-b225-9182997e82e0\") " pod="openshift-console/console-5dc67597bd-6lscg" Nov 26 22:27:47 crc kubenswrapper[4903]: I1126 22:27:47.774522 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1c981989-99f6-4954-b225-9182997e82e0-console-config\") pod \"console-5dc67597bd-6lscg\" (UID: \"1c981989-99f6-4954-b225-9182997e82e0\") " pod="openshift-console/console-5dc67597bd-6lscg" Nov 26 22:27:47 crc kubenswrapper[4903]: I1126 22:27:47.774542 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxnkj\" (UniqueName: \"kubernetes.io/projected/1c981989-99f6-4954-b225-9182997e82e0-kube-api-access-qxnkj\") pod \"console-5dc67597bd-6lscg\" (UID: \"1c981989-99f6-4954-b225-9182997e82e0\") " pod="openshift-console/console-5dc67597bd-6lscg" Nov 26 22:27:47 crc kubenswrapper[4903]: I1126 22:27:47.774581 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1c981989-99f6-4954-b225-9182997e82e0-oauth-serving-cert\") pod \"console-5dc67597bd-6lscg\" (UID: \"1c981989-99f6-4954-b225-9182997e82e0\") " pod="openshift-console/console-5dc67597bd-6lscg" Nov 26 22:27:47 crc kubenswrapper[4903]: I1126 22:27:47.774598 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1c981989-99f6-4954-b225-9182997e82e0-console-serving-cert\") pod \"console-5dc67597bd-6lscg\" (UID: \"1c981989-99f6-4954-b225-9182997e82e0\") " pod="openshift-console/console-5dc67597bd-6lscg" Nov 26 22:27:47 crc kubenswrapper[4903]: I1126 22:27:47.775941 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1c981989-99f6-4954-b225-9182997e82e0-trusted-ca-bundle\") pod \"console-5dc67597bd-6lscg\" (UID: \"1c981989-99f6-4954-b225-9182997e82e0\") " pod="openshift-console/console-5dc67597bd-6lscg" Nov 26 22:27:47 crc kubenswrapper[4903]: I1126 22:27:47.775952 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1c981989-99f6-4954-b225-9182997e82e0-oauth-serving-cert\") pod \"console-5dc67597bd-6lscg\" (UID: \"1c981989-99f6-4954-b225-9182997e82e0\") " pod="openshift-console/console-5dc67597bd-6lscg" Nov 26 22:27:47 crc kubenswrapper[4903]: I1126 22:27:47.776254 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1c981989-99f6-4954-b225-9182997e82e0-service-ca\") pod \"console-5dc67597bd-6lscg\" (UID: \"1c981989-99f6-4954-b225-9182997e82e0\") " pod="openshift-console/console-5dc67597bd-6lscg" Nov 26 22:27:47 crc kubenswrapper[4903]: I1126 22:27:47.776486 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1c981989-99f6-4954-b225-9182997e82e0-console-config\") pod \"console-5dc67597bd-6lscg\" (UID: \"1c981989-99f6-4954-b225-9182997e82e0\") " pod="openshift-console/console-5dc67597bd-6lscg" Nov 26 22:27:47 crc kubenswrapper[4903]: I1126 22:27:47.779529 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1c981989-99f6-4954-b225-9182997e82e0-console-serving-cert\") pod \"console-5dc67597bd-6lscg\" (UID: \"1c981989-99f6-4954-b225-9182997e82e0\") " pod="openshift-console/console-5dc67597bd-6lscg" Nov 26 22:27:47 crc kubenswrapper[4903]: I1126 22:27:47.779971 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1c981989-99f6-4954-b225-9182997e82e0-console-oauth-config\") pod \"console-5dc67597bd-6lscg\" (UID: \"1c981989-99f6-4954-b225-9182997e82e0\") " pod="openshift-console/console-5dc67597bd-6lscg" Nov 26 22:27:47 crc kubenswrapper[4903]: I1126 22:27:47.791564 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxnkj\" (UniqueName: \"kubernetes.io/projected/1c981989-99f6-4954-b225-9182997e82e0-kube-api-access-qxnkj\") pod \"console-5dc67597bd-6lscg\" (UID: \"1c981989-99f6-4954-b225-9182997e82e0\") " pod="openshift-console/console-5dc67597bd-6lscg" Nov 26 22:27:47 crc kubenswrapper[4903]: I1126 22:27:47.957386 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5dc67597bd-6lscg" Nov 26 22:27:48 crc kubenswrapper[4903]: I1126 22:27:48.207507 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-5dc67597bd-6lscg"] Nov 26 22:27:49 crc kubenswrapper[4903]: I1126 22:27:49.081289 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5dc67597bd-6lscg" event={"ID":"1c981989-99f6-4954-b225-9182997e82e0","Type":"ContainerStarted","Data":"c7b46bca3fcef3916c357064d13466bc6fc2f2354a12fa2497e25e829db3582a"} Nov 26 22:27:49 crc kubenswrapper[4903]: I1126 22:27:49.081615 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5dc67597bd-6lscg" event={"ID":"1c981989-99f6-4954-b225-9182997e82e0","Type":"ContainerStarted","Data":"5534bedf413c681f318678398be23daccf0dec6d0c6d74f61cf9bb3b27a51c4a"} Nov 26 22:27:49 crc kubenswrapper[4903]: I1126 22:27:49.109098 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-5dc67597bd-6lscg" podStartSLOduration=2.109073959 podStartE2EDuration="2.109073959s" podCreationTimestamp="2025-11-26 22:27:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:27:49.106915491 +0000 UTC m=+397.797150431" watchObservedRunningTime="2025-11-26 22:27:49.109073959 +0000 UTC m=+397.799308899" Nov 26 22:27:57 crc kubenswrapper[4903]: I1126 22:27:57.957664 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-5dc67597bd-6lscg" Nov 26 22:27:57 crc kubenswrapper[4903]: I1126 22:27:57.958472 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-5dc67597bd-6lscg" Nov 26 22:27:57 crc kubenswrapper[4903]: I1126 22:27:57.967181 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-5dc67597bd-6lscg" Nov 26 22:27:58 crc kubenswrapper[4903]: I1126 22:27:58.162318 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-5dc67597bd-6lscg" Nov 26 22:27:58 crc kubenswrapper[4903]: I1126 22:27:58.232442 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-869cd9b9c9-pnxtx"] Nov 26 22:28:01 crc kubenswrapper[4903]: I1126 22:28:01.981667 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 22:28:01 crc kubenswrapper[4903]: I1126 22:28:01.982388 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 22:28:23 crc kubenswrapper[4903]: I1126 22:28:23.286778 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-869cd9b9c9-pnxtx" podUID="984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8" containerName="console" containerID="cri-o://a603866dd9ad482c5a6b168676dac6de6e309f1d336497d23614f3efbe785210" gracePeriod=15 Nov 26 22:28:23 crc kubenswrapper[4903]: I1126 22:28:23.732872 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-869cd9b9c9-pnxtx_984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8/console/0.log" Nov 26 22:28:23 crc kubenswrapper[4903]: I1126 22:28:23.733228 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-869cd9b9c9-pnxtx" Nov 26 22:28:23 crc kubenswrapper[4903]: I1126 22:28:23.854767 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-service-ca\") pod \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\" (UID: \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\") " Nov 26 22:28:23 crc kubenswrapper[4903]: I1126 22:28:23.854944 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-console-serving-cert\") pod \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\" (UID: \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\") " Nov 26 22:28:23 crc kubenswrapper[4903]: I1126 22:28:23.855055 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-trusted-ca-bundle\") pod \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\" (UID: \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\") " Nov 26 22:28:23 crc kubenswrapper[4903]: I1126 22:28:23.855127 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9bzzb\" (UniqueName: \"kubernetes.io/projected/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-kube-api-access-9bzzb\") pod \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\" (UID: \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\") " Nov 26 22:28:23 crc kubenswrapper[4903]: I1126 22:28:23.855196 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-console-config\") pod \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\" (UID: \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\") " Nov 26 22:28:23 crc kubenswrapper[4903]: I1126 22:28:23.855251 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-oauth-serving-cert\") pod \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\" (UID: \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\") " Nov 26 22:28:23 crc kubenswrapper[4903]: I1126 22:28:23.855287 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-console-oauth-config\") pod \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\" (UID: \"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8\") " Nov 26 22:28:23 crc kubenswrapper[4903]: I1126 22:28:23.856376 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-console-config" (OuterVolumeSpecName: "console-config") pod "984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8" (UID: "984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:28:23 crc kubenswrapper[4903]: I1126 22:28:23.856734 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-service-ca" (OuterVolumeSpecName: "service-ca") pod "984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8" (UID: "984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:28:23 crc kubenswrapper[4903]: I1126 22:28:23.856925 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8" (UID: "984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:28:23 crc kubenswrapper[4903]: I1126 22:28:23.857069 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8" (UID: "984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:28:23 crc kubenswrapper[4903]: I1126 22:28:23.864964 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8" (UID: "984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:28:23 crc kubenswrapper[4903]: I1126 22:28:23.865378 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-kube-api-access-9bzzb" (OuterVolumeSpecName: "kube-api-access-9bzzb") pod "984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8" (UID: "984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8"). InnerVolumeSpecName "kube-api-access-9bzzb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:28:23 crc kubenswrapper[4903]: I1126 22:28:23.868096 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8" (UID: "984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:28:23 crc kubenswrapper[4903]: I1126 22:28:23.957296 4903 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:28:23 crc kubenswrapper[4903]: I1126 22:28:23.957791 4903 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:28:23 crc kubenswrapper[4903]: I1126 22:28:23.957830 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9bzzb\" (UniqueName: \"kubernetes.io/projected/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-kube-api-access-9bzzb\") on node \"crc\" DevicePath \"\"" Nov 26 22:28:23 crc kubenswrapper[4903]: I1126 22:28:23.957852 4903 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-console-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:28:23 crc kubenswrapper[4903]: I1126 22:28:23.957870 4903 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:28:23 crc kubenswrapper[4903]: I1126 22:28:23.957886 4903 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:28:23 crc kubenswrapper[4903]: I1126 22:28:23.957904 4903 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 22:28:24 crc kubenswrapper[4903]: I1126 22:28:24.348924 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-869cd9b9c9-pnxtx_984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8/console/0.log" Nov 26 22:28:24 crc kubenswrapper[4903]: I1126 22:28:24.349084 4903 generic.go:334] "Generic (PLEG): container finished" podID="984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8" containerID="a603866dd9ad482c5a6b168676dac6de6e309f1d336497d23614f3efbe785210" exitCode=2 Nov 26 22:28:24 crc kubenswrapper[4903]: I1126 22:28:24.349145 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-869cd9b9c9-pnxtx" event={"ID":"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8","Type":"ContainerDied","Data":"a603866dd9ad482c5a6b168676dac6de6e309f1d336497d23614f3efbe785210"} Nov 26 22:28:24 crc kubenswrapper[4903]: I1126 22:28:24.349190 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-869cd9b9c9-pnxtx" event={"ID":"984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8","Type":"ContainerDied","Data":"30c0914b34db8819f98239558304a9dc8c3f4e04559d95fef0e1ada258befe77"} Nov 26 22:28:24 crc kubenswrapper[4903]: I1126 22:28:24.349208 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-869cd9b9c9-pnxtx" Nov 26 22:28:24 crc kubenswrapper[4903]: I1126 22:28:24.349242 4903 scope.go:117] "RemoveContainer" containerID="a603866dd9ad482c5a6b168676dac6de6e309f1d336497d23614f3efbe785210" Nov 26 22:28:24 crc kubenswrapper[4903]: I1126 22:28:24.378658 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-869cd9b9c9-pnxtx"] Nov 26 22:28:24 crc kubenswrapper[4903]: I1126 22:28:24.384921 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-869cd9b9c9-pnxtx"] Nov 26 22:28:24 crc kubenswrapper[4903]: I1126 22:28:24.388444 4903 scope.go:117] "RemoveContainer" containerID="a603866dd9ad482c5a6b168676dac6de6e309f1d336497d23614f3efbe785210" Nov 26 22:28:24 crc kubenswrapper[4903]: E1126 22:28:24.389103 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a603866dd9ad482c5a6b168676dac6de6e309f1d336497d23614f3efbe785210\": container with ID starting with a603866dd9ad482c5a6b168676dac6de6e309f1d336497d23614f3efbe785210 not found: ID does not exist" containerID="a603866dd9ad482c5a6b168676dac6de6e309f1d336497d23614f3efbe785210" Nov 26 22:28:24 crc kubenswrapper[4903]: I1126 22:28:24.389155 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a603866dd9ad482c5a6b168676dac6de6e309f1d336497d23614f3efbe785210"} err="failed to get container status \"a603866dd9ad482c5a6b168676dac6de6e309f1d336497d23614f3efbe785210\": rpc error: code = NotFound desc = could not find container \"a603866dd9ad482c5a6b168676dac6de6e309f1d336497d23614f3efbe785210\": container with ID starting with a603866dd9ad482c5a6b168676dac6de6e309f1d336497d23614f3efbe785210 not found: ID does not exist" Nov 26 22:28:24 crc kubenswrapper[4903]: I1126 22:28:24.634793 4903 patch_prober.go:28] interesting pod/console-869cd9b9c9-pnxtx container/console namespace/openshift-console: Readiness probe status=failure output="Get \"https://10.217.0.69:8443/health\": dial tcp 10.217.0.69:8443: i/o timeout" start-of-body= Nov 26 22:28:24 crc kubenswrapper[4903]: I1126 22:28:24.634873 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/console-869cd9b9c9-pnxtx" podUID="984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8" containerName="console" probeResult="failure" output="Get \"https://10.217.0.69:8443/health\": dial tcp 10.217.0.69:8443: i/o timeout" Nov 26 22:28:26 crc kubenswrapper[4903]: I1126 22:28:26.038147 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8" path="/var/lib/kubelet/pods/984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8/volumes" Nov 26 22:28:31 crc kubenswrapper[4903]: I1126 22:28:31.980923 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 22:28:31 crc kubenswrapper[4903]: I1126 22:28:31.981272 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 22:28:31 crc kubenswrapper[4903]: I1126 22:28:31.981330 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 22:28:31 crc kubenswrapper[4903]: I1126 22:28:31.982387 4903 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"96e687a4eac5ec0d09c0b75e4590018ddcce7bd80d552c8e11b1f99591cbaa37"} pod="openshift-machine-config-operator/machine-config-daemon-wjwph" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 22:28:31 crc kubenswrapper[4903]: I1126 22:28:31.982478 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" containerID="cri-o://96e687a4eac5ec0d09c0b75e4590018ddcce7bd80d552c8e11b1f99591cbaa37" gracePeriod=600 Nov 26 22:28:32 crc kubenswrapper[4903]: I1126 22:28:32.422195 4903 generic.go:334] "Generic (PLEG): container finished" podID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerID="96e687a4eac5ec0d09c0b75e4590018ddcce7bd80d552c8e11b1f99591cbaa37" exitCode=0 Nov 26 22:28:32 crc kubenswrapper[4903]: I1126 22:28:32.422310 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerDied","Data":"96e687a4eac5ec0d09c0b75e4590018ddcce7bd80d552c8e11b1f99591cbaa37"} Nov 26 22:28:32 crc kubenswrapper[4903]: I1126 22:28:32.422758 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerStarted","Data":"2aaaca3e91746b5d4a5250d027be0943958bdff764bee382a723226bf824aef4"} Nov 26 22:28:32 crc kubenswrapper[4903]: I1126 22:28:32.422848 4903 scope.go:117] "RemoveContainer" containerID="0a5898b94c2291cf57b3006925719207e2e7698d44733dd218aadd55be604740" Nov 26 22:30:00 crc kubenswrapper[4903]: I1126 22:30:00.150498 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403270-2527k"] Nov 26 22:30:00 crc kubenswrapper[4903]: E1126 22:30:00.151466 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8" containerName="console" Nov 26 22:30:00 crc kubenswrapper[4903]: I1126 22:30:00.151489 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8" containerName="console" Nov 26 22:30:00 crc kubenswrapper[4903]: I1126 22:30:00.151735 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="984e95b9-15e6-4fc2-a1e7-9c0dbfa620e8" containerName="console" Nov 26 22:30:00 crc kubenswrapper[4903]: I1126 22:30:00.152366 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403270-2527k" Nov 26 22:30:00 crc kubenswrapper[4903]: I1126 22:30:00.154900 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 22:30:00 crc kubenswrapper[4903]: I1126 22:30:00.155561 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 22:30:00 crc kubenswrapper[4903]: I1126 22:30:00.179076 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403270-2527k"] Nov 26 22:30:00 crc kubenswrapper[4903]: I1126 22:30:00.182466 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/77e6fff5-e9a3-46c5-98b8-b0085f5de807-secret-volume\") pod \"collect-profiles-29403270-2527k\" (UID: \"77e6fff5-e9a3-46c5-98b8-b0085f5de807\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403270-2527k" Nov 26 22:30:00 crc kubenswrapper[4903]: I1126 22:30:00.182536 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9pbsq\" (UniqueName: \"kubernetes.io/projected/77e6fff5-e9a3-46c5-98b8-b0085f5de807-kube-api-access-9pbsq\") pod \"collect-profiles-29403270-2527k\" (UID: \"77e6fff5-e9a3-46c5-98b8-b0085f5de807\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403270-2527k" Nov 26 22:30:00 crc kubenswrapper[4903]: I1126 22:30:00.182617 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/77e6fff5-e9a3-46c5-98b8-b0085f5de807-config-volume\") pod \"collect-profiles-29403270-2527k\" (UID: \"77e6fff5-e9a3-46c5-98b8-b0085f5de807\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403270-2527k" Nov 26 22:30:00 crc kubenswrapper[4903]: I1126 22:30:00.283969 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/77e6fff5-e9a3-46c5-98b8-b0085f5de807-secret-volume\") pod \"collect-profiles-29403270-2527k\" (UID: \"77e6fff5-e9a3-46c5-98b8-b0085f5de807\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403270-2527k" Nov 26 22:30:00 crc kubenswrapper[4903]: I1126 22:30:00.284049 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9pbsq\" (UniqueName: \"kubernetes.io/projected/77e6fff5-e9a3-46c5-98b8-b0085f5de807-kube-api-access-9pbsq\") pod \"collect-profiles-29403270-2527k\" (UID: \"77e6fff5-e9a3-46c5-98b8-b0085f5de807\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403270-2527k" Nov 26 22:30:00 crc kubenswrapper[4903]: I1126 22:30:00.284079 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/77e6fff5-e9a3-46c5-98b8-b0085f5de807-config-volume\") pod \"collect-profiles-29403270-2527k\" (UID: \"77e6fff5-e9a3-46c5-98b8-b0085f5de807\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403270-2527k" Nov 26 22:30:00 crc kubenswrapper[4903]: I1126 22:30:00.284965 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/77e6fff5-e9a3-46c5-98b8-b0085f5de807-config-volume\") pod \"collect-profiles-29403270-2527k\" (UID: \"77e6fff5-e9a3-46c5-98b8-b0085f5de807\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403270-2527k" Nov 26 22:30:00 crc kubenswrapper[4903]: I1126 22:30:00.289992 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/77e6fff5-e9a3-46c5-98b8-b0085f5de807-secret-volume\") pod \"collect-profiles-29403270-2527k\" (UID: \"77e6fff5-e9a3-46c5-98b8-b0085f5de807\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403270-2527k" Nov 26 22:30:00 crc kubenswrapper[4903]: I1126 22:30:00.310567 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9pbsq\" (UniqueName: \"kubernetes.io/projected/77e6fff5-e9a3-46c5-98b8-b0085f5de807-kube-api-access-9pbsq\") pod \"collect-profiles-29403270-2527k\" (UID: \"77e6fff5-e9a3-46c5-98b8-b0085f5de807\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403270-2527k" Nov 26 22:30:00 crc kubenswrapper[4903]: I1126 22:30:00.473633 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403270-2527k" Nov 26 22:30:00 crc kubenswrapper[4903]: I1126 22:30:00.721074 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403270-2527k"] Nov 26 22:30:01 crc kubenswrapper[4903]: I1126 22:30:01.165771 4903 generic.go:334] "Generic (PLEG): container finished" podID="77e6fff5-e9a3-46c5-98b8-b0085f5de807" containerID="613653e85df9f97e0f794a2d98e8f23047b3f69ac4a2fac47d49382033d1875b" exitCode=0 Nov 26 22:30:01 crc kubenswrapper[4903]: I1126 22:30:01.165849 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403270-2527k" event={"ID":"77e6fff5-e9a3-46c5-98b8-b0085f5de807","Type":"ContainerDied","Data":"613653e85df9f97e0f794a2d98e8f23047b3f69ac4a2fac47d49382033d1875b"} Nov 26 22:30:01 crc kubenswrapper[4903]: I1126 22:30:01.165893 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403270-2527k" event={"ID":"77e6fff5-e9a3-46c5-98b8-b0085f5de807","Type":"ContainerStarted","Data":"a833a4f9c63406fc40276d40a6cc06e6a85248aefac8d0515f47ec4114777b68"} Nov 26 22:30:02 crc kubenswrapper[4903]: I1126 22:30:02.512949 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403270-2527k" Nov 26 22:30:02 crc kubenswrapper[4903]: I1126 22:30:02.528230 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/77e6fff5-e9a3-46c5-98b8-b0085f5de807-secret-volume\") pod \"77e6fff5-e9a3-46c5-98b8-b0085f5de807\" (UID: \"77e6fff5-e9a3-46c5-98b8-b0085f5de807\") " Nov 26 22:30:02 crc kubenswrapper[4903]: I1126 22:30:02.528900 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9pbsq\" (UniqueName: \"kubernetes.io/projected/77e6fff5-e9a3-46c5-98b8-b0085f5de807-kube-api-access-9pbsq\") pod \"77e6fff5-e9a3-46c5-98b8-b0085f5de807\" (UID: \"77e6fff5-e9a3-46c5-98b8-b0085f5de807\") " Nov 26 22:30:02 crc kubenswrapper[4903]: I1126 22:30:02.529106 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/77e6fff5-e9a3-46c5-98b8-b0085f5de807-config-volume\") pod \"77e6fff5-e9a3-46c5-98b8-b0085f5de807\" (UID: \"77e6fff5-e9a3-46c5-98b8-b0085f5de807\") " Nov 26 22:30:02 crc kubenswrapper[4903]: I1126 22:30:02.529675 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/77e6fff5-e9a3-46c5-98b8-b0085f5de807-config-volume" (OuterVolumeSpecName: "config-volume") pod "77e6fff5-e9a3-46c5-98b8-b0085f5de807" (UID: "77e6fff5-e9a3-46c5-98b8-b0085f5de807"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:30:02 crc kubenswrapper[4903]: I1126 22:30:02.537021 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77e6fff5-e9a3-46c5-98b8-b0085f5de807-kube-api-access-9pbsq" (OuterVolumeSpecName: "kube-api-access-9pbsq") pod "77e6fff5-e9a3-46c5-98b8-b0085f5de807" (UID: "77e6fff5-e9a3-46c5-98b8-b0085f5de807"). InnerVolumeSpecName "kube-api-access-9pbsq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:30:02 crc kubenswrapper[4903]: I1126 22:30:02.538856 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77e6fff5-e9a3-46c5-98b8-b0085f5de807-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "77e6fff5-e9a3-46c5-98b8-b0085f5de807" (UID: "77e6fff5-e9a3-46c5-98b8-b0085f5de807"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:30:02 crc kubenswrapper[4903]: I1126 22:30:02.634469 4903 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/77e6fff5-e9a3-46c5-98b8-b0085f5de807-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 22:30:02 crc kubenswrapper[4903]: I1126 22:30:02.634836 4903 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/77e6fff5-e9a3-46c5-98b8-b0085f5de807-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 22:30:02 crc kubenswrapper[4903]: I1126 22:30:02.634902 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9pbsq\" (UniqueName: \"kubernetes.io/projected/77e6fff5-e9a3-46c5-98b8-b0085f5de807-kube-api-access-9pbsq\") on node \"crc\" DevicePath \"\"" Nov 26 22:30:03 crc kubenswrapper[4903]: I1126 22:30:03.183206 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403270-2527k" event={"ID":"77e6fff5-e9a3-46c5-98b8-b0085f5de807","Type":"ContainerDied","Data":"a833a4f9c63406fc40276d40a6cc06e6a85248aefac8d0515f47ec4114777b68"} Nov 26 22:30:03 crc kubenswrapper[4903]: I1126 22:30:03.183563 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a833a4f9c63406fc40276d40a6cc06e6a85248aefac8d0515f47ec4114777b68" Nov 26 22:30:03 crc kubenswrapper[4903]: I1126 22:30:03.183327 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403270-2527k" Nov 26 22:30:49 crc kubenswrapper[4903]: I1126 22:30:49.705201 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85"] Nov 26 22:30:49 crc kubenswrapper[4903]: E1126 22:30:49.706205 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77e6fff5-e9a3-46c5-98b8-b0085f5de807" containerName="collect-profiles" Nov 26 22:30:49 crc kubenswrapper[4903]: I1126 22:30:49.706226 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="77e6fff5-e9a3-46c5-98b8-b0085f5de807" containerName="collect-profiles" Nov 26 22:30:49 crc kubenswrapper[4903]: I1126 22:30:49.706442 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="77e6fff5-e9a3-46c5-98b8-b0085f5de807" containerName="collect-profiles" Nov 26 22:30:49 crc kubenswrapper[4903]: I1126 22:30:49.707821 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85" Nov 26 22:30:49 crc kubenswrapper[4903]: I1126 22:30:49.710158 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 26 22:30:49 crc kubenswrapper[4903]: I1126 22:30:49.721394 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85"] Nov 26 22:30:49 crc kubenswrapper[4903]: I1126 22:30:49.860371 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b3a3d04f-4e15-4207-ab86-0a9c7f6da454-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85\" (UID: \"b3a3d04f-4e15-4207-ab86-0a9c7f6da454\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85" Nov 26 22:30:49 crc kubenswrapper[4903]: I1126 22:30:49.860524 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b3a3d04f-4e15-4207-ab86-0a9c7f6da454-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85\" (UID: \"b3a3d04f-4e15-4207-ab86-0a9c7f6da454\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85" Nov 26 22:30:49 crc kubenswrapper[4903]: I1126 22:30:49.860593 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2kzz\" (UniqueName: \"kubernetes.io/projected/b3a3d04f-4e15-4207-ab86-0a9c7f6da454-kube-api-access-n2kzz\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85\" (UID: \"b3a3d04f-4e15-4207-ab86-0a9c7f6da454\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85" Nov 26 22:30:49 crc kubenswrapper[4903]: I1126 22:30:49.961638 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b3a3d04f-4e15-4207-ab86-0a9c7f6da454-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85\" (UID: \"b3a3d04f-4e15-4207-ab86-0a9c7f6da454\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85" Nov 26 22:30:49 crc kubenswrapper[4903]: I1126 22:30:49.961760 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2kzz\" (UniqueName: \"kubernetes.io/projected/b3a3d04f-4e15-4207-ab86-0a9c7f6da454-kube-api-access-n2kzz\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85\" (UID: \"b3a3d04f-4e15-4207-ab86-0a9c7f6da454\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85" Nov 26 22:30:49 crc kubenswrapper[4903]: I1126 22:30:49.961891 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b3a3d04f-4e15-4207-ab86-0a9c7f6da454-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85\" (UID: \"b3a3d04f-4e15-4207-ab86-0a9c7f6da454\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85" Nov 26 22:30:49 crc kubenswrapper[4903]: I1126 22:30:49.962351 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b3a3d04f-4e15-4207-ab86-0a9c7f6da454-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85\" (UID: \"b3a3d04f-4e15-4207-ab86-0a9c7f6da454\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85" Nov 26 22:30:49 crc kubenswrapper[4903]: I1126 22:30:49.962586 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b3a3d04f-4e15-4207-ab86-0a9c7f6da454-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85\" (UID: \"b3a3d04f-4e15-4207-ab86-0a9c7f6da454\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85" Nov 26 22:30:49 crc kubenswrapper[4903]: I1126 22:30:49.989516 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2kzz\" (UniqueName: \"kubernetes.io/projected/b3a3d04f-4e15-4207-ab86-0a9c7f6da454-kube-api-access-n2kzz\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85\" (UID: \"b3a3d04f-4e15-4207-ab86-0a9c7f6da454\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85" Nov 26 22:30:50 crc kubenswrapper[4903]: I1126 22:30:50.078762 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85" Nov 26 22:30:50 crc kubenswrapper[4903]: I1126 22:30:50.338026 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85"] Nov 26 22:30:50 crc kubenswrapper[4903]: W1126 22:30:50.347863 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3a3d04f_4e15_4207_ab86_0a9c7f6da454.slice/crio-022a36dedcd3fb5b50561558f030512547662537004789e63c5ae628b5ee2ce4 WatchSource:0}: Error finding container 022a36dedcd3fb5b50561558f030512547662537004789e63c5ae628b5ee2ce4: Status 404 returned error can't find the container with id 022a36dedcd3fb5b50561558f030512547662537004789e63c5ae628b5ee2ce4 Nov 26 22:30:50 crc kubenswrapper[4903]: I1126 22:30:50.526911 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85" event={"ID":"b3a3d04f-4e15-4207-ab86-0a9c7f6da454","Type":"ContainerStarted","Data":"022a36dedcd3fb5b50561558f030512547662537004789e63c5ae628b5ee2ce4"} Nov 26 22:30:51 crc kubenswrapper[4903]: I1126 22:30:51.538114 4903 generic.go:334] "Generic (PLEG): container finished" podID="b3a3d04f-4e15-4207-ab86-0a9c7f6da454" containerID="b1525ad79c12ffd0043eb42e7d349ae0382a46a0d67d15fbbcc8c20a68e87444" exitCode=0 Nov 26 22:30:51 crc kubenswrapper[4903]: I1126 22:30:51.538215 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85" event={"ID":"b3a3d04f-4e15-4207-ab86-0a9c7f6da454","Type":"ContainerDied","Data":"b1525ad79c12ffd0043eb42e7d349ae0382a46a0d67d15fbbcc8c20a68e87444"} Nov 26 22:30:51 crc kubenswrapper[4903]: I1126 22:30:51.541306 4903 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 22:30:53 crc kubenswrapper[4903]: I1126 22:30:53.564121 4903 generic.go:334] "Generic (PLEG): container finished" podID="b3a3d04f-4e15-4207-ab86-0a9c7f6da454" containerID="17097ad95647cac557ca7d687043269547f8d302b3a9241624dc72c394b4045f" exitCode=0 Nov 26 22:30:53 crc kubenswrapper[4903]: I1126 22:30:53.564220 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85" event={"ID":"b3a3d04f-4e15-4207-ab86-0a9c7f6da454","Type":"ContainerDied","Data":"17097ad95647cac557ca7d687043269547f8d302b3a9241624dc72c394b4045f"} Nov 26 22:30:54 crc kubenswrapper[4903]: I1126 22:30:54.575527 4903 generic.go:334] "Generic (PLEG): container finished" podID="b3a3d04f-4e15-4207-ab86-0a9c7f6da454" containerID="7ce3d935b55253d404a419f9de8e89e93471a21910678e1a0e2aa9253965ece7" exitCode=0 Nov 26 22:30:54 crc kubenswrapper[4903]: I1126 22:30:54.575608 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85" event={"ID":"b3a3d04f-4e15-4207-ab86-0a9c7f6da454","Type":"ContainerDied","Data":"7ce3d935b55253d404a419f9de8e89e93471a21910678e1a0e2aa9253965ece7"} Nov 26 22:30:55 crc kubenswrapper[4903]: I1126 22:30:55.923613 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85" Nov 26 22:30:56 crc kubenswrapper[4903]: I1126 22:30:56.064109 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b3a3d04f-4e15-4207-ab86-0a9c7f6da454-bundle\") pod \"b3a3d04f-4e15-4207-ab86-0a9c7f6da454\" (UID: \"b3a3d04f-4e15-4207-ab86-0a9c7f6da454\") " Nov 26 22:30:56 crc kubenswrapper[4903]: I1126 22:30:56.064272 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b3a3d04f-4e15-4207-ab86-0a9c7f6da454-util\") pod \"b3a3d04f-4e15-4207-ab86-0a9c7f6da454\" (UID: \"b3a3d04f-4e15-4207-ab86-0a9c7f6da454\") " Nov 26 22:30:56 crc kubenswrapper[4903]: I1126 22:30:56.064367 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n2kzz\" (UniqueName: \"kubernetes.io/projected/b3a3d04f-4e15-4207-ab86-0a9c7f6da454-kube-api-access-n2kzz\") pod \"b3a3d04f-4e15-4207-ab86-0a9c7f6da454\" (UID: \"b3a3d04f-4e15-4207-ab86-0a9c7f6da454\") " Nov 26 22:30:56 crc kubenswrapper[4903]: I1126 22:30:56.068519 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3a3d04f-4e15-4207-ab86-0a9c7f6da454-bundle" (OuterVolumeSpecName: "bundle") pod "b3a3d04f-4e15-4207-ab86-0a9c7f6da454" (UID: "b3a3d04f-4e15-4207-ab86-0a9c7f6da454"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:30:56 crc kubenswrapper[4903]: I1126 22:30:56.073384 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3a3d04f-4e15-4207-ab86-0a9c7f6da454-kube-api-access-n2kzz" (OuterVolumeSpecName: "kube-api-access-n2kzz") pod "b3a3d04f-4e15-4207-ab86-0a9c7f6da454" (UID: "b3a3d04f-4e15-4207-ab86-0a9c7f6da454"). InnerVolumeSpecName "kube-api-access-n2kzz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:30:56 crc kubenswrapper[4903]: I1126 22:30:56.088578 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3a3d04f-4e15-4207-ab86-0a9c7f6da454-util" (OuterVolumeSpecName: "util") pod "b3a3d04f-4e15-4207-ab86-0a9c7f6da454" (UID: "b3a3d04f-4e15-4207-ab86-0a9c7f6da454"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:30:56 crc kubenswrapper[4903]: I1126 22:30:56.166961 4903 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b3a3d04f-4e15-4207-ab86-0a9c7f6da454-util\") on node \"crc\" DevicePath \"\"" Nov 26 22:30:56 crc kubenswrapper[4903]: I1126 22:30:56.167018 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n2kzz\" (UniqueName: \"kubernetes.io/projected/b3a3d04f-4e15-4207-ab86-0a9c7f6da454-kube-api-access-n2kzz\") on node \"crc\" DevicePath \"\"" Nov 26 22:30:56 crc kubenswrapper[4903]: I1126 22:30:56.167045 4903 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b3a3d04f-4e15-4207-ab86-0a9c7f6da454-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:30:56 crc kubenswrapper[4903]: I1126 22:30:56.609072 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85" event={"ID":"b3a3d04f-4e15-4207-ab86-0a9c7f6da454","Type":"ContainerDied","Data":"022a36dedcd3fb5b50561558f030512547662537004789e63c5ae628b5ee2ce4"} Nov 26 22:30:56 crc kubenswrapper[4903]: I1126 22:30:56.609132 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="022a36dedcd3fb5b50561558f030512547662537004789e63c5ae628b5ee2ce4" Nov 26 22:30:56 crc kubenswrapper[4903]: I1126 22:30:56.609150 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85" Nov 26 22:31:00 crc kubenswrapper[4903]: I1126 22:31:00.902741 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-bbznt"] Nov 26 22:31:00 crc kubenswrapper[4903]: I1126 22:31:00.903413 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="ovn-controller" containerID="cri-o://8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0" gracePeriod=30 Nov 26 22:31:00 crc kubenswrapper[4903]: I1126 22:31:00.903465 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="nbdb" containerID="cri-o://74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca" gracePeriod=30 Nov 26 22:31:00 crc kubenswrapper[4903]: I1126 22:31:00.903541 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="kube-rbac-proxy-node" containerID="cri-o://c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a" gracePeriod=30 Nov 26 22:31:00 crc kubenswrapper[4903]: I1126 22:31:00.903514 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364" gracePeriod=30 Nov 26 22:31:00 crc kubenswrapper[4903]: I1126 22:31:00.903596 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="ovn-acl-logging" containerID="cri-o://e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a" gracePeriod=30 Nov 26 22:31:00 crc kubenswrapper[4903]: I1126 22:31:00.903745 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="sbdb" containerID="cri-o://ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f" gracePeriod=30 Nov 26 22:31:00 crc kubenswrapper[4903]: I1126 22:31:00.903765 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="northd" containerID="cri-o://02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff" gracePeriod=30 Nov 26 22:31:00 crc kubenswrapper[4903]: I1126 22:31:00.939855 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="ovnkube-controller" containerID="cri-o://7d13350527ea0ac731b90f25b1e6590105c42f27d90a4499a7f9b8eb8be911a5" gracePeriod=30 Nov 26 22:31:01 crc kubenswrapper[4903]: I1126 22:31:01.647537 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bxnsh_229974d7-7b78-434b-a346-8b9004e69bf2/kube-multus/2.log" Nov 26 22:31:01 crc kubenswrapper[4903]: I1126 22:31:01.648420 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bxnsh_229974d7-7b78-434b-a346-8b9004e69bf2/kube-multus/1.log" Nov 26 22:31:01 crc kubenswrapper[4903]: I1126 22:31:01.648463 4903 generic.go:334] "Generic (PLEG): container finished" podID="229974d7-7b78-434b-a346-8b9004e69bf2" containerID="a7e8cfe57c3a57c637ffddf064cb78b7f997c1fa34e6aeee992af477cee52eb0" exitCode=2 Nov 26 22:31:01 crc kubenswrapper[4903]: I1126 22:31:01.648519 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bxnsh" event={"ID":"229974d7-7b78-434b-a346-8b9004e69bf2","Type":"ContainerDied","Data":"a7e8cfe57c3a57c637ffddf064cb78b7f997c1fa34e6aeee992af477cee52eb0"} Nov 26 22:31:01 crc kubenswrapper[4903]: I1126 22:31:01.648551 4903 scope.go:117] "RemoveContainer" containerID="969475c82879642645019fdc1665aaf0dc4c167c07887a47d02b4d5be0b6d66a" Nov 26 22:31:01 crc kubenswrapper[4903]: I1126 22:31:01.649279 4903 scope.go:117] "RemoveContainer" containerID="a7e8cfe57c3a57c637ffddf064cb78b7f997c1fa34e6aeee992af477cee52eb0" Nov 26 22:31:01 crc kubenswrapper[4903]: E1126 22:31:01.649651 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-bxnsh_openshift-multus(229974d7-7b78-434b-a346-8b9004e69bf2)\"" pod="openshift-multus/multus-bxnsh" podUID="229974d7-7b78-434b-a346-8b9004e69bf2" Nov 26 22:31:01 crc kubenswrapper[4903]: I1126 22:31:01.652180 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bbznt_ef55a921-a95f-4b2b-84b7-98c1082a1bb6/ovnkube-controller/3.log" Nov 26 22:31:01 crc kubenswrapper[4903]: I1126 22:31:01.654467 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bbznt_ef55a921-a95f-4b2b-84b7-98c1082a1bb6/ovn-acl-logging/0.log" Nov 26 22:31:01 crc kubenswrapper[4903]: I1126 22:31:01.655169 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bbznt_ef55a921-a95f-4b2b-84b7-98c1082a1bb6/ovn-controller/0.log" Nov 26 22:31:01 crc kubenswrapper[4903]: I1126 22:31:01.655822 4903 generic.go:334] "Generic (PLEG): container finished" podID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerID="7d13350527ea0ac731b90f25b1e6590105c42f27d90a4499a7f9b8eb8be911a5" exitCode=0 Nov 26 22:31:01 crc kubenswrapper[4903]: I1126 22:31:01.655841 4903 generic.go:334] "Generic (PLEG): container finished" podID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerID="ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f" exitCode=0 Nov 26 22:31:01 crc kubenswrapper[4903]: I1126 22:31:01.655850 4903 generic.go:334] "Generic (PLEG): container finished" podID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerID="74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca" exitCode=0 Nov 26 22:31:01 crc kubenswrapper[4903]: I1126 22:31:01.655858 4903 generic.go:334] "Generic (PLEG): container finished" podID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerID="02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff" exitCode=0 Nov 26 22:31:01 crc kubenswrapper[4903]: I1126 22:31:01.655864 4903 generic.go:334] "Generic (PLEG): container finished" podID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerID="e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a" exitCode=143 Nov 26 22:31:01 crc kubenswrapper[4903]: I1126 22:31:01.655871 4903 generic.go:334] "Generic (PLEG): container finished" podID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerID="8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0" exitCode=143 Nov 26 22:31:01 crc kubenswrapper[4903]: I1126 22:31:01.655888 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" event={"ID":"ef55a921-a95f-4b2b-84b7-98c1082a1bb6","Type":"ContainerDied","Data":"7d13350527ea0ac731b90f25b1e6590105c42f27d90a4499a7f9b8eb8be911a5"} Nov 26 22:31:01 crc kubenswrapper[4903]: I1126 22:31:01.655912 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" event={"ID":"ef55a921-a95f-4b2b-84b7-98c1082a1bb6","Type":"ContainerDied","Data":"ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f"} Nov 26 22:31:01 crc kubenswrapper[4903]: I1126 22:31:01.655922 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" event={"ID":"ef55a921-a95f-4b2b-84b7-98c1082a1bb6","Type":"ContainerDied","Data":"74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca"} Nov 26 22:31:01 crc kubenswrapper[4903]: I1126 22:31:01.655933 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" event={"ID":"ef55a921-a95f-4b2b-84b7-98c1082a1bb6","Type":"ContainerDied","Data":"02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff"} Nov 26 22:31:01 crc kubenswrapper[4903]: I1126 22:31:01.655953 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" event={"ID":"ef55a921-a95f-4b2b-84b7-98c1082a1bb6","Type":"ContainerDied","Data":"e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a"} Nov 26 22:31:01 crc kubenswrapper[4903]: I1126 22:31:01.655962 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" event={"ID":"ef55a921-a95f-4b2b-84b7-98c1082a1bb6","Type":"ContainerDied","Data":"8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0"} Nov 26 22:31:01 crc kubenswrapper[4903]: I1126 22:31:01.677428 4903 scope.go:117] "RemoveContainer" containerID="93ba50c6aca389bf5400db19615fbeb2a035b284237a1fdbfa1d6e7d4190d06f" Nov 26 22:31:01 crc kubenswrapper[4903]: I1126 22:31:01.980961 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 22:31:01 crc kubenswrapper[4903]: I1126 22:31:01.981016 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.070328 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bbznt_ef55a921-a95f-4b2b-84b7-98c1082a1bb6/ovn-acl-logging/0.log" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.070798 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bbznt_ef55a921-a95f-4b2b-84b7-98c1082a1bb6/ovn-controller/0.log" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.071258 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.159482 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-env-overrides\") pod \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.159528 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.159550 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-kubelet\") pod \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.159569 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-systemd-units\") pod \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.159595 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8mpcc\" (UniqueName: \"kubernetes.io/projected/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-kube-api-access-8mpcc\") pod \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.159615 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-cni-bin\") pod \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.159644 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-log-socket\") pod \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.159664 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-ovn-node-metrics-cert\") pod \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.159677 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-cni-netd\") pod \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.159712 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-run-ovn-kubernetes\") pod \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.159740 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-ovnkube-script-lib\") pod \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.159757 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-run-ovn\") pod \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.159777 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-node-log\") pod \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.159791 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-var-lib-openvswitch\") pod \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.159809 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-etc-openvswitch\") pod \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.159849 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-slash\") pod \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.159864 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-run-systemd\") pod \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.159880 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-run-netns\") pod \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.159896 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "ef55a921-a95f-4b2b-84b7-98c1082a1bb6" (UID: "ef55a921-a95f-4b2b-84b7-98c1082a1bb6"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.159911 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-run-openvswitch\") pod \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.159961 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "ef55a921-a95f-4b2b-84b7-98c1082a1bb6" (UID: "ef55a921-a95f-4b2b-84b7-98c1082a1bb6"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.159979 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-ovnkube-config\") pod \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\" (UID: \"ef55a921-a95f-4b2b-84b7-98c1082a1bb6\") " Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.159994 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "ef55a921-a95f-4b2b-84b7-98c1082a1bb6" (UID: "ef55a921-a95f-4b2b-84b7-98c1082a1bb6"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.160014 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "ef55a921-a95f-4b2b-84b7-98c1082a1bb6" (UID: "ef55a921-a95f-4b2b-84b7-98c1082a1bb6"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.160032 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "ef55a921-a95f-4b2b-84b7-98c1082a1bb6" (UID: "ef55a921-a95f-4b2b-84b7-98c1082a1bb6"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.160273 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "ef55a921-a95f-4b2b-84b7-98c1082a1bb6" (UID: "ef55a921-a95f-4b2b-84b7-98c1082a1bb6"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.160325 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "ef55a921-a95f-4b2b-84b7-98c1082a1bb6" (UID: "ef55a921-a95f-4b2b-84b7-98c1082a1bb6"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.160444 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "ef55a921-a95f-4b2b-84b7-98c1082a1bb6" (UID: "ef55a921-a95f-4b2b-84b7-98c1082a1bb6"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.160480 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-node-log" (OuterVolumeSpecName: "node-log") pod "ef55a921-a95f-4b2b-84b7-98c1082a1bb6" (UID: "ef55a921-a95f-4b2b-84b7-98c1082a1bb6"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.160522 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "ef55a921-a95f-4b2b-84b7-98c1082a1bb6" (UID: "ef55a921-a95f-4b2b-84b7-98c1082a1bb6"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.160618 4903 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.160632 4903 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.160642 4903 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-node-log\") on node \"crc\" DevicePath \"\"" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.160650 4903 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.160659 4903 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.160667 4903 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.160675 4903 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.160684 4903 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.160708 4903 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.160719 4903 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.160731 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "ef55a921-a95f-4b2b-84b7-98c1082a1bb6" (UID: "ef55a921-a95f-4b2b-84b7-98c1082a1bb6"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.160788 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-slash" (OuterVolumeSpecName: "host-slash") pod "ef55a921-a95f-4b2b-84b7-98c1082a1bb6" (UID: "ef55a921-a95f-4b2b-84b7-98c1082a1bb6"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.160815 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "ef55a921-a95f-4b2b-84b7-98c1082a1bb6" (UID: "ef55a921-a95f-4b2b-84b7-98c1082a1bb6"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.160834 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-log-socket" (OuterVolumeSpecName: "log-socket") pod "ef55a921-a95f-4b2b-84b7-98c1082a1bb6" (UID: "ef55a921-a95f-4b2b-84b7-98c1082a1bb6"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.161001 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "ef55a921-a95f-4b2b-84b7-98c1082a1bb6" (UID: "ef55a921-a95f-4b2b-84b7-98c1082a1bb6"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.161050 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "ef55a921-a95f-4b2b-84b7-98c1082a1bb6" (UID: "ef55a921-a95f-4b2b-84b7-98c1082a1bb6"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.161269 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "ef55a921-a95f-4b2b-84b7-98c1082a1bb6" (UID: "ef55a921-a95f-4b2b-84b7-98c1082a1bb6"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.172089 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "ef55a921-a95f-4b2b-84b7-98c1082a1bb6" (UID: "ef55a921-a95f-4b2b-84b7-98c1082a1bb6"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.172248 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-kube-api-access-8mpcc" (OuterVolumeSpecName: "kube-api-access-8mpcc") pod "ef55a921-a95f-4b2b-84b7-98c1082a1bb6" (UID: "ef55a921-a95f-4b2b-84b7-98c1082a1bb6"). InnerVolumeSpecName "kube-api-access-8mpcc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.179301 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "ef55a921-a95f-4b2b-84b7-98c1082a1bb6" (UID: "ef55a921-a95f-4b2b-84b7-98c1082a1bb6"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.183023 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-6wb7c"] Nov 26 22:31:02 crc kubenswrapper[4903]: E1126 22:31:02.183292 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="ovn-acl-logging" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.183348 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="ovn-acl-logging" Nov 26 22:31:02 crc kubenswrapper[4903]: E1126 22:31:02.183418 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="kube-rbac-proxy-ovn-metrics" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.183469 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="kube-rbac-proxy-ovn-metrics" Nov 26 22:31:02 crc kubenswrapper[4903]: E1126 22:31:02.183520 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3a3d04f-4e15-4207-ab86-0a9c7f6da454" containerName="extract" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.183569 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3a3d04f-4e15-4207-ab86-0a9c7f6da454" containerName="extract" Nov 26 22:31:02 crc kubenswrapper[4903]: E1126 22:31:02.183620 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="northd" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.183667 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="northd" Nov 26 22:31:02 crc kubenswrapper[4903]: E1126 22:31:02.183732 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="nbdb" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.183785 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="nbdb" Nov 26 22:31:02 crc kubenswrapper[4903]: E1126 22:31:02.183835 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="ovnkube-controller" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.183883 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="ovnkube-controller" Nov 26 22:31:02 crc kubenswrapper[4903]: E1126 22:31:02.183932 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="ovnkube-controller" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.183978 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="ovnkube-controller" Nov 26 22:31:02 crc kubenswrapper[4903]: E1126 22:31:02.184028 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="ovnkube-controller" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.184076 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="ovnkube-controller" Nov 26 22:31:02 crc kubenswrapper[4903]: E1126 22:31:02.184123 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="ovnkube-controller" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.184166 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="ovnkube-controller" Nov 26 22:31:02 crc kubenswrapper[4903]: E1126 22:31:02.184212 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="ovnkube-controller" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.184258 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="ovnkube-controller" Nov 26 22:31:02 crc kubenswrapper[4903]: E1126 22:31:02.184303 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="kube-rbac-proxy-node" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.184352 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="kube-rbac-proxy-node" Nov 26 22:31:02 crc kubenswrapper[4903]: E1126 22:31:02.184402 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3a3d04f-4e15-4207-ab86-0a9c7f6da454" containerName="util" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.184445 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3a3d04f-4e15-4207-ab86-0a9c7f6da454" containerName="util" Nov 26 22:31:02 crc kubenswrapper[4903]: E1126 22:31:02.184492 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3a3d04f-4e15-4207-ab86-0a9c7f6da454" containerName="pull" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.184535 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3a3d04f-4e15-4207-ab86-0a9c7f6da454" containerName="pull" Nov 26 22:31:02 crc kubenswrapper[4903]: E1126 22:31:02.184583 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="sbdb" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.184627 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="sbdb" Nov 26 22:31:02 crc kubenswrapper[4903]: E1126 22:31:02.184675 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="ovn-controller" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.184736 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="ovn-controller" Nov 26 22:31:02 crc kubenswrapper[4903]: E1126 22:31:02.184783 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="kubecfg-setup" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.184826 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="kubecfg-setup" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.184999 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="kube-rbac-proxy-node" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.185050 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="ovn-acl-logging" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.185109 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="sbdb" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.185156 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="ovnkube-controller" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.185207 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="ovnkube-controller" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.185251 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3a3d04f-4e15-4207-ab86-0a9c7f6da454" containerName="extract" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.185303 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="ovnkube-controller" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.185348 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="ovn-controller" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.185396 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="ovnkube-controller" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.185441 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="kube-rbac-proxy-ovn-metrics" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.185488 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="nbdb" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.185532 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="northd" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.185776 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerName="ovnkube-controller" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.187453 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.261782 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/20840f36-3f5a-425a-b019-9339828a3f27-ovn-node-metrics-cert\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.262003 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-host-run-ovn-kubernetes\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.262071 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-etc-openvswitch\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.262180 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-host-cni-bin\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.262262 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-run-ovn\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.262333 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/20840f36-3f5a-425a-b019-9339828a3f27-ovnkube-script-lib\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.262402 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-log-socket\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.262471 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-host-cni-netd\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.262531 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-host-slash\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.262602 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-systemd-units\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.262676 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.262780 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/20840f36-3f5a-425a-b019-9339828a3f27-ovnkube-config\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.262855 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-run-systemd\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.262926 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-var-lib-openvswitch\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.262990 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-run-openvswitch\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.263072 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/20840f36-3f5a-425a-b019-9339828a3f27-env-overrides\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.263139 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-host-run-netns\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.263293 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-host-kubelet\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.263359 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-node-log\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.263423 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x8mz7\" (UniqueName: \"kubernetes.io/projected/20840f36-3f5a-425a-b019-9339828a3f27-kube-api-access-x8mz7\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.263505 4903 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-slash\") on node \"crc\" DevicePath \"\"" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.263558 4903 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.263610 4903 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.263663 4903 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.263728 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8mpcc\" (UniqueName: \"kubernetes.io/projected/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-kube-api-access-8mpcc\") on node \"crc\" DevicePath \"\"" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.263777 4903 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-log-socket\") on node \"crc\" DevicePath \"\"" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.263822 4903 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.263868 4903 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.263922 4903 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.263970 4903 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/ef55a921-a95f-4b2b-84b7-98c1082a1bb6-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.364638 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-log-socket\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.364669 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-host-cni-netd\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.364699 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-host-slash\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.364717 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-log-socket\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.364722 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-systemd-units\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.364791 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-host-cni-netd\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.364838 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-systemd-units\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.364840 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-host-slash\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.364906 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.364953 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/20840f36-3f5a-425a-b019-9339828a3f27-ovnkube-config\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.364968 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-run-systemd\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.364994 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-var-lib-openvswitch\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.365012 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-run-openvswitch\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.365094 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/20840f36-3f5a-425a-b019-9339828a3f27-env-overrides\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.365125 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-host-kubelet\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.365140 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-host-run-netns\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.365164 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-node-log\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.365194 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x8mz7\" (UniqueName: \"kubernetes.io/projected/20840f36-3f5a-425a-b019-9339828a3f27-kube-api-access-x8mz7\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.365214 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/20840f36-3f5a-425a-b019-9339828a3f27-ovn-node-metrics-cert\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.365234 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-host-run-ovn-kubernetes\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.365252 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-etc-openvswitch\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.365267 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-host-cni-bin\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.365309 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-run-ovn\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.365345 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/20840f36-3f5a-425a-b019-9339828a3f27-ovnkube-script-lib\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.365500 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-node-log\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.365517 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-run-openvswitch\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.365566 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-host-cni-bin\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.365584 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-etc-openvswitch\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.365602 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-host-kubelet\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.365606 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-run-ovn\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.365601 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-host-run-ovn-kubernetes\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.365619 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-host-run-netns\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.365629 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-var-lib-openvswitch\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.365655 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.365679 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/20840f36-3f5a-425a-b019-9339828a3f27-ovnkube-config\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.365674 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/20840f36-3f5a-425a-b019-9339828a3f27-run-systemd\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.366001 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/20840f36-3f5a-425a-b019-9339828a3f27-env-overrides\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.366010 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/20840f36-3f5a-425a-b019-9339828a3f27-ovnkube-script-lib\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.368804 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/20840f36-3f5a-425a-b019-9339828a3f27-ovn-node-metrics-cert\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.395237 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x8mz7\" (UniqueName: \"kubernetes.io/projected/20840f36-3f5a-425a-b019-9339828a3f27-kube-api-access-x8mz7\") pod \"ovnkube-node-6wb7c\" (UID: \"20840f36-3f5a-425a-b019-9339828a3f27\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.511577 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.681094 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bxnsh_229974d7-7b78-434b-a346-8b9004e69bf2/kube-multus/2.log" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.718023 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bbznt_ef55a921-a95f-4b2b-84b7-98c1082a1bb6/ovn-acl-logging/0.log" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.735183 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-bbznt_ef55a921-a95f-4b2b-84b7-98c1082a1bb6/ovn-controller/0.log" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.739261 4903 generic.go:334] "Generic (PLEG): container finished" podID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerID="c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364" exitCode=0 Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.739299 4903 generic.go:334] "Generic (PLEG): container finished" podID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" containerID="c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a" exitCode=0 Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.739384 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" event={"ID":"ef55a921-a95f-4b2b-84b7-98c1082a1bb6","Type":"ContainerDied","Data":"c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364"} Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.739411 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" event={"ID":"ef55a921-a95f-4b2b-84b7-98c1082a1bb6","Type":"ContainerDied","Data":"c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a"} Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.739429 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" event={"ID":"ef55a921-a95f-4b2b-84b7-98c1082a1bb6","Type":"ContainerDied","Data":"acdfc2b82174b43757c8586dd647dce4fd2a6a49a7cd07e8050250cb98884d88"} Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.739445 4903 scope.go:117] "RemoveContainer" containerID="7d13350527ea0ac731b90f25b1e6590105c42f27d90a4499a7f9b8eb8be911a5" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.739664 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-bbznt" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.773886 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" event={"ID":"20840f36-3f5a-425a-b019-9339828a3f27","Type":"ContainerStarted","Data":"85092ca6e339213a9ac67905de515463a3aa177fad7b5f165d0dfacbdf7855a4"} Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.794859 4903 scope.go:117] "RemoveContainer" containerID="ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.842490 4903 scope.go:117] "RemoveContainer" containerID="74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.874050 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-bbznt"] Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.876285 4903 scope.go:117] "RemoveContainer" containerID="02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.880160 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-bbznt"] Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.903308 4903 scope.go:117] "RemoveContainer" containerID="c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.921003 4903 scope.go:117] "RemoveContainer" containerID="c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.933719 4903 scope.go:117] "RemoveContainer" containerID="e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.968916 4903 scope.go:117] "RemoveContainer" containerID="8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0" Nov 26 22:31:02 crc kubenswrapper[4903]: I1126 22:31:02.992174 4903 scope.go:117] "RemoveContainer" containerID="7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.038019 4903 scope.go:117] "RemoveContainer" containerID="7d13350527ea0ac731b90f25b1e6590105c42f27d90a4499a7f9b8eb8be911a5" Nov 26 22:31:03 crc kubenswrapper[4903]: E1126 22:31:03.044154 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d13350527ea0ac731b90f25b1e6590105c42f27d90a4499a7f9b8eb8be911a5\": container with ID starting with 7d13350527ea0ac731b90f25b1e6590105c42f27d90a4499a7f9b8eb8be911a5 not found: ID does not exist" containerID="7d13350527ea0ac731b90f25b1e6590105c42f27d90a4499a7f9b8eb8be911a5" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.044274 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d13350527ea0ac731b90f25b1e6590105c42f27d90a4499a7f9b8eb8be911a5"} err="failed to get container status \"7d13350527ea0ac731b90f25b1e6590105c42f27d90a4499a7f9b8eb8be911a5\": rpc error: code = NotFound desc = could not find container \"7d13350527ea0ac731b90f25b1e6590105c42f27d90a4499a7f9b8eb8be911a5\": container with ID starting with 7d13350527ea0ac731b90f25b1e6590105c42f27d90a4499a7f9b8eb8be911a5 not found: ID does not exist" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.044362 4903 scope.go:117] "RemoveContainer" containerID="ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f" Nov 26 22:31:03 crc kubenswrapper[4903]: E1126 22:31:03.044663 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\": container with ID starting with ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f not found: ID does not exist" containerID="ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.044772 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f"} err="failed to get container status \"ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\": rpc error: code = NotFound desc = could not find container \"ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\": container with ID starting with ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f not found: ID does not exist" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.044852 4903 scope.go:117] "RemoveContainer" containerID="74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca" Nov 26 22:31:03 crc kubenswrapper[4903]: E1126 22:31:03.045088 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\": container with ID starting with 74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca not found: ID does not exist" containerID="74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.045177 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca"} err="failed to get container status \"74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\": rpc error: code = NotFound desc = could not find container \"74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\": container with ID starting with 74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca not found: ID does not exist" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.045253 4903 scope.go:117] "RemoveContainer" containerID="02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff" Nov 26 22:31:03 crc kubenswrapper[4903]: E1126 22:31:03.045499 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\": container with ID starting with 02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff not found: ID does not exist" containerID="02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.045598 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff"} err="failed to get container status \"02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\": rpc error: code = NotFound desc = could not find container \"02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\": container with ID starting with 02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff not found: ID does not exist" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.045674 4903 scope.go:117] "RemoveContainer" containerID="c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364" Nov 26 22:31:03 crc kubenswrapper[4903]: E1126 22:31:03.049852 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\": container with ID starting with c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364 not found: ID does not exist" containerID="c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.049894 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364"} err="failed to get container status \"c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\": rpc error: code = NotFound desc = could not find container \"c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\": container with ID starting with c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364 not found: ID does not exist" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.049925 4903 scope.go:117] "RemoveContainer" containerID="c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a" Nov 26 22:31:03 crc kubenswrapper[4903]: E1126 22:31:03.053791 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\": container with ID starting with c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a not found: ID does not exist" containerID="c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.053889 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a"} err="failed to get container status \"c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\": rpc error: code = NotFound desc = could not find container \"c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\": container with ID starting with c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a not found: ID does not exist" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.053961 4903 scope.go:117] "RemoveContainer" containerID="e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a" Nov 26 22:31:03 crc kubenswrapper[4903]: E1126 22:31:03.054204 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\": container with ID starting with e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a not found: ID does not exist" containerID="e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.054284 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a"} err="failed to get container status \"e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\": rpc error: code = NotFound desc = could not find container \"e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\": container with ID starting with e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a not found: ID does not exist" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.054355 4903 scope.go:117] "RemoveContainer" containerID="8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0" Nov 26 22:31:03 crc kubenswrapper[4903]: E1126 22:31:03.054560 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\": container with ID starting with 8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0 not found: ID does not exist" containerID="8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.054642 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0"} err="failed to get container status \"8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\": rpc error: code = NotFound desc = could not find container \"8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\": container with ID starting with 8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0 not found: ID does not exist" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.054718 4903 scope.go:117] "RemoveContainer" containerID="7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6" Nov 26 22:31:03 crc kubenswrapper[4903]: E1126 22:31:03.054983 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\": container with ID starting with 7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6 not found: ID does not exist" containerID="7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.055073 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6"} err="failed to get container status \"7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\": rpc error: code = NotFound desc = could not find container \"7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\": container with ID starting with 7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6 not found: ID does not exist" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.055135 4903 scope.go:117] "RemoveContainer" containerID="7d13350527ea0ac731b90f25b1e6590105c42f27d90a4499a7f9b8eb8be911a5" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.055437 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d13350527ea0ac731b90f25b1e6590105c42f27d90a4499a7f9b8eb8be911a5"} err="failed to get container status \"7d13350527ea0ac731b90f25b1e6590105c42f27d90a4499a7f9b8eb8be911a5\": rpc error: code = NotFound desc = could not find container \"7d13350527ea0ac731b90f25b1e6590105c42f27d90a4499a7f9b8eb8be911a5\": container with ID starting with 7d13350527ea0ac731b90f25b1e6590105c42f27d90a4499a7f9b8eb8be911a5 not found: ID does not exist" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.055516 4903 scope.go:117] "RemoveContainer" containerID="ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.055748 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f"} err="failed to get container status \"ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\": rpc error: code = NotFound desc = could not find container \"ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f\": container with ID starting with ecb70d479c2a5165ba90884ef97834063e7276fb528d359b22c4177b262a799f not found: ID does not exist" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.055833 4903 scope.go:117] "RemoveContainer" containerID="74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.056049 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca"} err="failed to get container status \"74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\": rpc error: code = NotFound desc = could not find container \"74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca\": container with ID starting with 74e4bdb11ad9c24849b47cfef543a7b8080027f51dbac5b7bf1372e42bab49ca not found: ID does not exist" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.056165 4903 scope.go:117] "RemoveContainer" containerID="02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.056462 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff"} err="failed to get container status \"02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\": rpc error: code = NotFound desc = could not find container \"02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff\": container with ID starting with 02c2432df1af667596ba48caf965c227d83b3b4f200ca59efb48e3e373ed33ff not found: ID does not exist" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.056554 4903 scope.go:117] "RemoveContainer" containerID="c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.056862 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364"} err="failed to get container status \"c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\": rpc error: code = NotFound desc = could not find container \"c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364\": container with ID starting with c5ed8ce0a1694e958c09d034f41cc27924fd06c3994b1cf2ae688c98da0ef364 not found: ID does not exist" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.056897 4903 scope.go:117] "RemoveContainer" containerID="c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.057164 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a"} err="failed to get container status \"c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\": rpc error: code = NotFound desc = could not find container \"c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a\": container with ID starting with c1d7325e04f49911538e95afcfc3d1b5762a4b4ae7fbe6dce0563f9c443ba77a not found: ID does not exist" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.057194 4903 scope.go:117] "RemoveContainer" containerID="e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.057444 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a"} err="failed to get container status \"e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\": rpc error: code = NotFound desc = could not find container \"e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a\": container with ID starting with e367a77acf94b02e3e958e309a7cd192e17f14272ffa386b58d54d3c5f41920a not found: ID does not exist" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.057471 4903 scope.go:117] "RemoveContainer" containerID="8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.062845 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0"} err="failed to get container status \"8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\": rpc error: code = NotFound desc = could not find container \"8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0\": container with ID starting with 8fcd1d83314b567863aac6480c657613fb4d4a72e1f9551999e7a3b3a15996c0 not found: ID does not exist" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.062882 4903 scope.go:117] "RemoveContainer" containerID="7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.066855 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6"} err="failed to get container status \"7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\": rpc error: code = NotFound desc = could not find container \"7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6\": container with ID starting with 7368f583a2a883e44cab43f990d4b360e40dd6b3b3adc64a397849549fc6a2f6 not found: ID does not exist" Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.783542 4903 generic.go:334] "Generic (PLEG): container finished" podID="20840f36-3f5a-425a-b019-9339828a3f27" containerID="26e2d7e7662807871df9a8bdae26c79c88785bef8434cfccdffd2625fa0c803a" exitCode=0 Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.784080 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" event={"ID":"20840f36-3f5a-425a-b019-9339828a3f27","Type":"ContainerStarted","Data":"17f824f505b29b08bb94bb111ae01418f0261fc8681f10ac491fb8061d2ac2ca"} Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.784171 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" event={"ID":"20840f36-3f5a-425a-b019-9339828a3f27","Type":"ContainerStarted","Data":"265c8a5a107f3e962e4537377c8c1d56c3aa28185e8d2671993f554660aea2b5"} Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.784228 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" event={"ID":"20840f36-3f5a-425a-b019-9339828a3f27","Type":"ContainerStarted","Data":"14a8536abd66676acdf334644388c5c46cd7fd6f56c0e192554a9e0564a5286a"} Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.784281 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" event={"ID":"20840f36-3f5a-425a-b019-9339828a3f27","Type":"ContainerStarted","Data":"62812996ef47dc0c75453335d08678f02619bef1a558c1ccf83352d6958de158"} Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.784347 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" event={"ID":"20840f36-3f5a-425a-b019-9339828a3f27","Type":"ContainerStarted","Data":"7cd3ce7620a3e1952c1a9a8d11f92d50f97f6a80661f36124ca4362cc55320a5"} Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.784400 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" event={"ID":"20840f36-3f5a-425a-b019-9339828a3f27","Type":"ContainerStarted","Data":"15fda709df9dd65eb76eb187ae4fb78477e201cda4f6f6d5ea6c8299d080ff25"} Nov 26 22:31:03 crc kubenswrapper[4903]: I1126 22:31:03.784457 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" event={"ID":"20840f36-3f5a-425a-b019-9339828a3f27","Type":"ContainerDied","Data":"26e2d7e7662807871df9a8bdae26c79c88785bef8434cfccdffd2625fa0c803a"} Nov 26 22:31:04 crc kubenswrapper[4903]: I1126 22:31:04.037436 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ef55a921-a95f-4b2b-84b7-98c1082a1bb6" path="/var/lib/kubelet/pods/ef55a921-a95f-4b2b-84b7-98c1082a1bb6/volumes" Nov 26 22:31:06 crc kubenswrapper[4903]: I1126 22:31:06.809646 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" event={"ID":"20840f36-3f5a-425a-b019-9339828a3f27","Type":"ContainerStarted","Data":"aa192fb41340f9bd85e77a19aa667770bfb1984b3f2343f38973713e0cd3f2dd"} Nov 26 22:31:07 crc kubenswrapper[4903]: I1126 22:31:07.734136 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-kfn8d"] Nov 26 22:31:07 crc kubenswrapper[4903]: I1126 22:31:07.734980 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-kfn8d" Nov 26 22:31:07 crc kubenswrapper[4903]: I1126 22:31:07.736605 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-kdmtw" Nov 26 22:31:07 crc kubenswrapper[4903]: I1126 22:31:07.736835 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Nov 26 22:31:07 crc kubenswrapper[4903]: I1126 22:31:07.737813 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Nov 26 22:31:07 crc kubenswrapper[4903]: I1126 22:31:07.782395 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b"] Nov 26 22:31:07 crc kubenswrapper[4903]: I1126 22:31:07.783298 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b" Nov 26 22:31:07 crc kubenswrapper[4903]: I1126 22:31:07.791134 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-7scs2" Nov 26 22:31:07 crc kubenswrapper[4903]: I1126 22:31:07.792951 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Nov 26 22:31:07 crc kubenswrapper[4903]: I1126 22:31:07.796238 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-jw268"] Nov 26 22:31:07 crc kubenswrapper[4903]: I1126 22:31:07.797137 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-jw268" Nov 26 22:31:07 crc kubenswrapper[4903]: I1126 22:31:07.842631 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4g5ww\" (UniqueName: \"kubernetes.io/projected/5127cf5c-29a6-484d-9e1c-895e2bb109e3-kube-api-access-4g5ww\") pod \"obo-prometheus-operator-668cf9dfbb-kfn8d\" (UID: \"5127cf5c-29a6-484d-9e1c-895e2bb109e3\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-kfn8d" Nov 26 22:31:07 crc kubenswrapper[4903]: I1126 22:31:07.943407 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7674d75c-8272-4f53-86fe-3fb83d421c63-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-594b6c89d9-jw268\" (UID: \"7674d75c-8272-4f53-86fe-3fb83d421c63\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-jw268" Nov 26 22:31:07 crc kubenswrapper[4903]: I1126 22:31:07.943463 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/68d1419a-288f-4fcb-9d4d-8f9568fa2170-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b\" (UID: \"68d1419a-288f-4fcb-9d4d-8f9568fa2170\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b" Nov 26 22:31:07 crc kubenswrapper[4903]: I1126 22:31:07.943602 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4g5ww\" (UniqueName: \"kubernetes.io/projected/5127cf5c-29a6-484d-9e1c-895e2bb109e3-kube-api-access-4g5ww\") pod \"obo-prometheus-operator-668cf9dfbb-kfn8d\" (UID: \"5127cf5c-29a6-484d-9e1c-895e2bb109e3\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-kfn8d" Nov 26 22:31:07 crc kubenswrapper[4903]: I1126 22:31:07.943740 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/68d1419a-288f-4fcb-9d4d-8f9568fa2170-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b\" (UID: \"68d1419a-288f-4fcb-9d4d-8f9568fa2170\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b" Nov 26 22:31:07 crc kubenswrapper[4903]: I1126 22:31:07.943781 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7674d75c-8272-4f53-86fe-3fb83d421c63-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-594b6c89d9-jw268\" (UID: \"7674d75c-8272-4f53-86fe-3fb83d421c63\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-jw268" Nov 26 22:31:07 crc kubenswrapper[4903]: I1126 22:31:07.964111 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-6gzsx"] Nov 26 22:31:07 crc kubenswrapper[4903]: I1126 22:31:07.965003 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-6gzsx" Nov 26 22:31:07 crc kubenswrapper[4903]: I1126 22:31:07.969137 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4g5ww\" (UniqueName: \"kubernetes.io/projected/5127cf5c-29a6-484d-9e1c-895e2bb109e3-kube-api-access-4g5ww\") pod \"obo-prometheus-operator-668cf9dfbb-kfn8d\" (UID: \"5127cf5c-29a6-484d-9e1c-895e2bb109e3\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-kfn8d" Nov 26 22:31:07 crc kubenswrapper[4903]: I1126 22:31:07.971087 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-wxx6f" Nov 26 22:31:07 crc kubenswrapper[4903]: I1126 22:31:07.971259 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.045984 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dlrn2\" (UniqueName: \"kubernetes.io/projected/c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471-kube-api-access-dlrn2\") pod \"observability-operator-d8bb48f5d-6gzsx\" (UID: \"c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471\") " pod="openshift-operators/observability-operator-d8bb48f5d-6gzsx" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.049029 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-6gzsx\" (UID: \"c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471\") " pod="openshift-operators/observability-operator-d8bb48f5d-6gzsx" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.049143 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/68d1419a-288f-4fcb-9d4d-8f9568fa2170-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b\" (UID: \"68d1419a-288f-4fcb-9d4d-8f9568fa2170\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.049221 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7674d75c-8272-4f53-86fe-3fb83d421c63-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-594b6c89d9-jw268\" (UID: \"7674d75c-8272-4f53-86fe-3fb83d421c63\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-jw268" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.049392 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7674d75c-8272-4f53-86fe-3fb83d421c63-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-594b6c89d9-jw268\" (UID: \"7674d75c-8272-4f53-86fe-3fb83d421c63\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-jw268" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.049464 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/68d1419a-288f-4fcb-9d4d-8f9568fa2170-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b\" (UID: \"68d1419a-288f-4fcb-9d4d-8f9568fa2170\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.052438 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-kfn8d" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.054967 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/68d1419a-288f-4fcb-9d4d-8f9568fa2170-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b\" (UID: \"68d1419a-288f-4fcb-9d4d-8f9568fa2170\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.083444 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/68d1419a-288f-4fcb-9d4d-8f9568fa2170-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b\" (UID: \"68d1419a-288f-4fcb-9d4d-8f9568fa2170\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.084185 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7674d75c-8272-4f53-86fe-3fb83d421c63-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-594b6c89d9-jw268\" (UID: \"7674d75c-8272-4f53-86fe-3fb83d421c63\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-jw268" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.084776 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7674d75c-8272-4f53-86fe-3fb83d421c63-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-594b6c89d9-jw268\" (UID: \"7674d75c-8272-4f53-86fe-3fb83d421c63\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-jw268" Nov 26 22:31:08 crc kubenswrapper[4903]: E1126 22:31:08.103761 4903 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-kfn8d_openshift-operators_5127cf5c-29a6-484d-9e1c-895e2bb109e3_0(fd8afdde1cb9d9e0ff3cb2bcf675ebaef390cfcc629f5d0448192c35de5a4278): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 22:31:08 crc kubenswrapper[4903]: E1126 22:31:08.103848 4903 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-kfn8d_openshift-operators_5127cf5c-29a6-484d-9e1c-895e2bb109e3_0(fd8afdde1cb9d9e0ff3cb2bcf675ebaef390cfcc629f5d0448192c35de5a4278): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-kfn8d" Nov 26 22:31:08 crc kubenswrapper[4903]: E1126 22:31:08.103876 4903 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-kfn8d_openshift-operators_5127cf5c-29a6-484d-9e1c-895e2bb109e3_0(fd8afdde1cb9d9e0ff3cb2bcf675ebaef390cfcc629f5d0448192c35de5a4278): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-kfn8d" Nov 26 22:31:08 crc kubenswrapper[4903]: E1126 22:31:08.103925 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-668cf9dfbb-kfn8d_openshift-operators(5127cf5c-29a6-484d-9e1c-895e2bb109e3)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-668cf9dfbb-kfn8d_openshift-operators(5127cf5c-29a6-484d-9e1c-895e2bb109e3)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-kfn8d_openshift-operators_5127cf5c-29a6-484d-9e1c-895e2bb109e3_0(fd8afdde1cb9d9e0ff3cb2bcf675ebaef390cfcc629f5d0448192c35de5a4278): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-kfn8d" podUID="5127cf5c-29a6-484d-9e1c-895e2bb109e3" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.105228 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.108766 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5446b9c989-clswb"] Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.109566 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-jw268" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.110082 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-clswb" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.112977 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-r5bq9" Nov 26 22:31:08 crc kubenswrapper[4903]: E1126 22:31:08.147005 4903 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-594b6c89d9-jw268_openshift-operators_7674d75c-8272-4f53-86fe-3fb83d421c63_0(4d75569e3573d4f349191b05c0ed49bfcde3b742eafaea13633fee673facaabb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 22:31:08 crc kubenswrapper[4903]: E1126 22:31:08.147107 4903 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-594b6c89d9-jw268_openshift-operators_7674d75c-8272-4f53-86fe-3fb83d421c63_0(4d75569e3573d4f349191b05c0ed49bfcde3b742eafaea13633fee673facaabb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-jw268" Nov 26 22:31:08 crc kubenswrapper[4903]: E1126 22:31:08.147144 4903 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-594b6c89d9-jw268_openshift-operators_7674d75c-8272-4f53-86fe-3fb83d421c63_0(4d75569e3573d4f349191b05c0ed49bfcde3b742eafaea13633fee673facaabb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-jw268" Nov 26 22:31:08 crc kubenswrapper[4903]: E1126 22:31:08.147223 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-594b6c89d9-jw268_openshift-operators(7674d75c-8272-4f53-86fe-3fb83d421c63)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-594b6c89d9-jw268_openshift-operators(7674d75c-8272-4f53-86fe-3fb83d421c63)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-594b6c89d9-jw268_openshift-operators_7674d75c-8272-4f53-86fe-3fb83d421c63_0(4d75569e3573d4f349191b05c0ed49bfcde3b742eafaea13633fee673facaabb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-jw268" podUID="7674d75c-8272-4f53-86fe-3fb83d421c63" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.151457 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dlrn2\" (UniqueName: \"kubernetes.io/projected/c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471-kube-api-access-dlrn2\") pod \"observability-operator-d8bb48f5d-6gzsx\" (UID: \"c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471\") " pod="openshift-operators/observability-operator-d8bb48f5d-6gzsx" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.151511 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-6gzsx\" (UID: \"c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471\") " pod="openshift-operators/observability-operator-d8bb48f5d-6gzsx" Nov 26 22:31:08 crc kubenswrapper[4903]: E1126 22:31:08.153899 4903 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b_openshift-operators_68d1419a-288f-4fcb-9d4d-8f9568fa2170_0(0f819a1b31e6f126ee8bd12bab96cef2c51b6a5e44561453c0db9145969a85c6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 22:31:08 crc kubenswrapper[4903]: E1126 22:31:08.154018 4903 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b_openshift-operators_68d1419a-288f-4fcb-9d4d-8f9568fa2170_0(0f819a1b31e6f126ee8bd12bab96cef2c51b6a5e44561453c0db9145969a85c6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b" Nov 26 22:31:08 crc kubenswrapper[4903]: E1126 22:31:08.154176 4903 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b_openshift-operators_68d1419a-288f-4fcb-9d4d-8f9568fa2170_0(0f819a1b31e6f126ee8bd12bab96cef2c51b6a5e44561453c0db9145969a85c6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b" Nov 26 22:31:08 crc kubenswrapper[4903]: E1126 22:31:08.154409 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b_openshift-operators(68d1419a-288f-4fcb-9d4d-8f9568fa2170)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b_openshift-operators(68d1419a-288f-4fcb-9d4d-8f9568fa2170)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b_openshift-operators_68d1419a-288f-4fcb-9d4d-8f9568fa2170_0(0f819a1b31e6f126ee8bd12bab96cef2c51b6a5e44561453c0db9145969a85c6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b" podUID="68d1419a-288f-4fcb-9d4d-8f9568fa2170" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.161025 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-6gzsx\" (UID: \"c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471\") " pod="openshift-operators/observability-operator-d8bb48f5d-6gzsx" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.173369 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dlrn2\" (UniqueName: \"kubernetes.io/projected/c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471-kube-api-access-dlrn2\") pod \"observability-operator-d8bb48f5d-6gzsx\" (UID: \"c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471\") " pod="openshift-operators/observability-operator-d8bb48f5d-6gzsx" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.252453 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/193e7d1c-0f98-4013-aad9-16711a00ab2e-openshift-service-ca\") pod \"perses-operator-5446b9c989-clswb\" (UID: \"193e7d1c-0f98-4013-aad9-16711a00ab2e\") " pod="openshift-operators/perses-operator-5446b9c989-clswb" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.252495 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zqjql\" (UniqueName: \"kubernetes.io/projected/193e7d1c-0f98-4013-aad9-16711a00ab2e-kube-api-access-zqjql\") pod \"perses-operator-5446b9c989-clswb\" (UID: \"193e7d1c-0f98-4013-aad9-16711a00ab2e\") " pod="openshift-operators/perses-operator-5446b9c989-clswb" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.299551 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-6gzsx" Nov 26 22:31:08 crc kubenswrapper[4903]: E1126 22:31:08.337513 4903 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-6gzsx_openshift-operators_c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471_0(5e42bdff10e37de7db9544fd295e9c09019bf7a509d922c46e19c971341872cb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 22:31:08 crc kubenswrapper[4903]: E1126 22:31:08.337576 4903 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-6gzsx_openshift-operators_c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471_0(5e42bdff10e37de7db9544fd295e9c09019bf7a509d922c46e19c971341872cb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-6gzsx" Nov 26 22:31:08 crc kubenswrapper[4903]: E1126 22:31:08.337597 4903 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-6gzsx_openshift-operators_c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471_0(5e42bdff10e37de7db9544fd295e9c09019bf7a509d922c46e19c971341872cb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-6gzsx" Nov 26 22:31:08 crc kubenswrapper[4903]: E1126 22:31:08.337636 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-d8bb48f5d-6gzsx_openshift-operators(c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-d8bb48f5d-6gzsx_openshift-operators(c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-6gzsx_openshift-operators_c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471_0(5e42bdff10e37de7db9544fd295e9c09019bf7a509d922c46e19c971341872cb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-d8bb48f5d-6gzsx" podUID="c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.353956 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/193e7d1c-0f98-4013-aad9-16711a00ab2e-openshift-service-ca\") pod \"perses-operator-5446b9c989-clswb\" (UID: \"193e7d1c-0f98-4013-aad9-16711a00ab2e\") " pod="openshift-operators/perses-operator-5446b9c989-clswb" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.354006 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zqjql\" (UniqueName: \"kubernetes.io/projected/193e7d1c-0f98-4013-aad9-16711a00ab2e-kube-api-access-zqjql\") pod \"perses-operator-5446b9c989-clswb\" (UID: \"193e7d1c-0f98-4013-aad9-16711a00ab2e\") " pod="openshift-operators/perses-operator-5446b9c989-clswb" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.354820 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/193e7d1c-0f98-4013-aad9-16711a00ab2e-openshift-service-ca\") pod \"perses-operator-5446b9c989-clswb\" (UID: \"193e7d1c-0f98-4013-aad9-16711a00ab2e\") " pod="openshift-operators/perses-operator-5446b9c989-clswb" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.369146 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zqjql\" (UniqueName: \"kubernetes.io/projected/193e7d1c-0f98-4013-aad9-16711a00ab2e-kube-api-access-zqjql\") pod \"perses-operator-5446b9c989-clswb\" (UID: \"193e7d1c-0f98-4013-aad9-16711a00ab2e\") " pod="openshift-operators/perses-operator-5446b9c989-clswb" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.425102 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-clswb" Nov 26 22:31:08 crc kubenswrapper[4903]: E1126 22:31:08.450626 4903 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-clswb_openshift-operators_193e7d1c-0f98-4013-aad9-16711a00ab2e_0(1c04fbcb363c7c7dd1985b024b01b3b947d4267623874a25d46001cb01e1e0cd): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 22:31:08 crc kubenswrapper[4903]: E1126 22:31:08.450785 4903 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-clswb_openshift-operators_193e7d1c-0f98-4013-aad9-16711a00ab2e_0(1c04fbcb363c7c7dd1985b024b01b3b947d4267623874a25d46001cb01e1e0cd): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-clswb" Nov 26 22:31:08 crc kubenswrapper[4903]: E1126 22:31:08.450858 4903 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-clswb_openshift-operators_193e7d1c-0f98-4013-aad9-16711a00ab2e_0(1c04fbcb363c7c7dd1985b024b01b3b947d4267623874a25d46001cb01e1e0cd): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-clswb" Nov 26 22:31:08 crc kubenswrapper[4903]: E1126 22:31:08.450951 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5446b9c989-clswb_openshift-operators(193e7d1c-0f98-4013-aad9-16711a00ab2e)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5446b9c989-clswb_openshift-operators(193e7d1c-0f98-4013-aad9-16711a00ab2e)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-clswb_openshift-operators_193e7d1c-0f98-4013-aad9-16711a00ab2e_0(1c04fbcb363c7c7dd1985b024b01b3b947d4267623874a25d46001cb01e1e0cd): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5446b9c989-clswb" podUID="193e7d1c-0f98-4013-aad9-16711a00ab2e" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.823634 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" event={"ID":"20840f36-3f5a-425a-b019-9339828a3f27","Type":"ContainerStarted","Data":"4a493bdddab98b31d407252da3e040c04ce470144d4acb66f318791014792f55"} Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.823954 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.823989 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.860790 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:08 crc kubenswrapper[4903]: I1126 22:31:08.939815 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" podStartSLOduration=6.93980079 podStartE2EDuration="6.93980079s" podCreationTimestamp="2025-11-26 22:31:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:31:08.899447615 +0000 UTC m=+597.589682525" watchObservedRunningTime="2025-11-26 22:31:08.93980079 +0000 UTC m=+597.630035700" Nov 26 22:31:09 crc kubenswrapper[4903]: I1126 22:31:09.157117 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-kfn8d"] Nov 26 22:31:09 crc kubenswrapper[4903]: I1126 22:31:09.157220 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-kfn8d" Nov 26 22:31:09 crc kubenswrapper[4903]: I1126 22:31:09.157750 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-kfn8d" Nov 26 22:31:09 crc kubenswrapper[4903]: I1126 22:31:09.160171 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-jw268"] Nov 26 22:31:09 crc kubenswrapper[4903]: I1126 22:31:09.160326 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-jw268" Nov 26 22:31:09 crc kubenswrapper[4903]: I1126 22:31:09.160840 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-jw268" Nov 26 22:31:09 crc kubenswrapper[4903]: I1126 22:31:09.164120 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-6gzsx"] Nov 26 22:31:09 crc kubenswrapper[4903]: I1126 22:31:09.164196 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-6gzsx" Nov 26 22:31:09 crc kubenswrapper[4903]: I1126 22:31:09.164547 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-6gzsx" Nov 26 22:31:09 crc kubenswrapper[4903]: I1126 22:31:09.190782 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-clswb"] Nov 26 22:31:09 crc kubenswrapper[4903]: I1126 22:31:09.190887 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-clswb" Nov 26 22:31:09 crc kubenswrapper[4903]: I1126 22:31:09.191368 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-clswb" Nov 26 22:31:09 crc kubenswrapper[4903]: I1126 22:31:09.210528 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b"] Nov 26 22:31:09 crc kubenswrapper[4903]: I1126 22:31:09.210618 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b" Nov 26 22:31:09 crc kubenswrapper[4903]: I1126 22:31:09.211216 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b" Nov 26 22:31:09 crc kubenswrapper[4903]: E1126 22:31:09.214227 4903 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-6gzsx_openshift-operators_c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471_0(5cfc77cccbf505626efc46891e4a00c68db0c7cb0d6474d2f21a0f27548bfd95): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 22:31:09 crc kubenswrapper[4903]: E1126 22:31:09.214285 4903 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-6gzsx_openshift-operators_c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471_0(5cfc77cccbf505626efc46891e4a00c68db0c7cb0d6474d2f21a0f27548bfd95): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-6gzsx" Nov 26 22:31:09 crc kubenswrapper[4903]: E1126 22:31:09.214310 4903 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-6gzsx_openshift-operators_c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471_0(5cfc77cccbf505626efc46891e4a00c68db0c7cb0d6474d2f21a0f27548bfd95): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-6gzsx" Nov 26 22:31:09 crc kubenswrapper[4903]: E1126 22:31:09.214357 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-d8bb48f5d-6gzsx_openshift-operators(c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-d8bb48f5d-6gzsx_openshift-operators(c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-6gzsx_openshift-operators_c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471_0(5cfc77cccbf505626efc46891e4a00c68db0c7cb0d6474d2f21a0f27548bfd95): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-d8bb48f5d-6gzsx" podUID="c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471" Nov 26 22:31:09 crc kubenswrapper[4903]: E1126 22:31:09.223874 4903 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-kfn8d_openshift-operators_5127cf5c-29a6-484d-9e1c-895e2bb109e3_0(9fbf892874e0d67089b30fb87f8779b3c9718e34ef561d873a67dd7520fac27d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 22:31:09 crc kubenswrapper[4903]: E1126 22:31:09.223936 4903 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-kfn8d_openshift-operators_5127cf5c-29a6-484d-9e1c-895e2bb109e3_0(9fbf892874e0d67089b30fb87f8779b3c9718e34ef561d873a67dd7520fac27d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-kfn8d" Nov 26 22:31:09 crc kubenswrapper[4903]: E1126 22:31:09.223956 4903 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-kfn8d_openshift-operators_5127cf5c-29a6-484d-9e1c-895e2bb109e3_0(9fbf892874e0d67089b30fb87f8779b3c9718e34ef561d873a67dd7520fac27d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-kfn8d" Nov 26 22:31:09 crc kubenswrapper[4903]: E1126 22:31:09.224000 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-668cf9dfbb-kfn8d_openshift-operators(5127cf5c-29a6-484d-9e1c-895e2bb109e3)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-668cf9dfbb-kfn8d_openshift-operators(5127cf5c-29a6-484d-9e1c-895e2bb109e3)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-kfn8d_openshift-operators_5127cf5c-29a6-484d-9e1c-895e2bb109e3_0(9fbf892874e0d67089b30fb87f8779b3c9718e34ef561d873a67dd7520fac27d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-kfn8d" podUID="5127cf5c-29a6-484d-9e1c-895e2bb109e3" Nov 26 22:31:09 crc kubenswrapper[4903]: E1126 22:31:09.229480 4903 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-594b6c89d9-jw268_openshift-operators_7674d75c-8272-4f53-86fe-3fb83d421c63_0(787b317054c4719bb10455dfe8602deef9ab4efb864f2b9fbee6301c9ef754c2): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 22:31:09 crc kubenswrapper[4903]: E1126 22:31:09.229582 4903 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-594b6c89d9-jw268_openshift-operators_7674d75c-8272-4f53-86fe-3fb83d421c63_0(787b317054c4719bb10455dfe8602deef9ab4efb864f2b9fbee6301c9ef754c2): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-jw268" Nov 26 22:31:09 crc kubenswrapper[4903]: E1126 22:31:09.230021 4903 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-594b6c89d9-jw268_openshift-operators_7674d75c-8272-4f53-86fe-3fb83d421c63_0(787b317054c4719bb10455dfe8602deef9ab4efb864f2b9fbee6301c9ef754c2): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-jw268" Nov 26 22:31:09 crc kubenswrapper[4903]: E1126 22:31:09.230123 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-594b6c89d9-jw268_openshift-operators(7674d75c-8272-4f53-86fe-3fb83d421c63)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-594b6c89d9-jw268_openshift-operators(7674d75c-8272-4f53-86fe-3fb83d421c63)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-594b6c89d9-jw268_openshift-operators_7674d75c-8272-4f53-86fe-3fb83d421c63_0(787b317054c4719bb10455dfe8602deef9ab4efb864f2b9fbee6301c9ef754c2): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-jw268" podUID="7674d75c-8272-4f53-86fe-3fb83d421c63" Nov 26 22:31:09 crc kubenswrapper[4903]: E1126 22:31:09.250866 4903 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-clswb_openshift-operators_193e7d1c-0f98-4013-aad9-16711a00ab2e_0(ec634be171dee8eb581d8322ef4523b10fa2fe036c8c1035450b493d071b652d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 22:31:09 crc kubenswrapper[4903]: E1126 22:31:09.250935 4903 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-clswb_openshift-operators_193e7d1c-0f98-4013-aad9-16711a00ab2e_0(ec634be171dee8eb581d8322ef4523b10fa2fe036c8c1035450b493d071b652d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-clswb" Nov 26 22:31:09 crc kubenswrapper[4903]: E1126 22:31:09.250957 4903 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-clswb_openshift-operators_193e7d1c-0f98-4013-aad9-16711a00ab2e_0(ec634be171dee8eb581d8322ef4523b10fa2fe036c8c1035450b493d071b652d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-clswb" Nov 26 22:31:09 crc kubenswrapper[4903]: E1126 22:31:09.250995 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5446b9c989-clswb_openshift-operators(193e7d1c-0f98-4013-aad9-16711a00ab2e)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5446b9c989-clswb_openshift-operators(193e7d1c-0f98-4013-aad9-16711a00ab2e)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-clswb_openshift-operators_193e7d1c-0f98-4013-aad9-16711a00ab2e_0(ec634be171dee8eb581d8322ef4523b10fa2fe036c8c1035450b493d071b652d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5446b9c989-clswb" podUID="193e7d1c-0f98-4013-aad9-16711a00ab2e" Nov 26 22:31:09 crc kubenswrapper[4903]: E1126 22:31:09.289550 4903 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b_openshift-operators_68d1419a-288f-4fcb-9d4d-8f9568fa2170_0(caf2e0170197ac383dbf58f43952702d61ca849f0d3897bac01d8a9dd4cd8ada): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 22:31:09 crc kubenswrapper[4903]: E1126 22:31:09.289614 4903 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b_openshift-operators_68d1419a-288f-4fcb-9d4d-8f9568fa2170_0(caf2e0170197ac383dbf58f43952702d61ca849f0d3897bac01d8a9dd4cd8ada): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b" Nov 26 22:31:09 crc kubenswrapper[4903]: E1126 22:31:09.289634 4903 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b_openshift-operators_68d1419a-288f-4fcb-9d4d-8f9568fa2170_0(caf2e0170197ac383dbf58f43952702d61ca849f0d3897bac01d8a9dd4cd8ada): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b" Nov 26 22:31:09 crc kubenswrapper[4903]: E1126 22:31:09.289674 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b_openshift-operators(68d1419a-288f-4fcb-9d4d-8f9568fa2170)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b_openshift-operators(68d1419a-288f-4fcb-9d4d-8f9568fa2170)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b_openshift-operators_68d1419a-288f-4fcb-9d4d-8f9568fa2170_0(caf2e0170197ac383dbf58f43952702d61ca849f0d3897bac01d8a9dd4cd8ada): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b" podUID="68d1419a-288f-4fcb-9d4d-8f9568fa2170" Nov 26 22:31:09 crc kubenswrapper[4903]: I1126 22:31:09.831860 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:09 crc kubenswrapper[4903]: I1126 22:31:09.880327 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:13 crc kubenswrapper[4903]: I1126 22:31:13.028908 4903 scope.go:117] "RemoveContainer" containerID="a7e8cfe57c3a57c637ffddf064cb78b7f997c1fa34e6aeee992af477cee52eb0" Nov 26 22:31:13 crc kubenswrapper[4903]: E1126 22:31:13.029478 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-bxnsh_openshift-multus(229974d7-7b78-434b-a346-8b9004e69bf2)\"" pod="openshift-multus/multus-bxnsh" podUID="229974d7-7b78-434b-a346-8b9004e69bf2" Nov 26 22:31:21 crc kubenswrapper[4903]: I1126 22:31:21.027889 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-6gzsx" Nov 26 22:31:21 crc kubenswrapper[4903]: I1126 22:31:21.028943 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-6gzsx" Nov 26 22:31:21 crc kubenswrapper[4903]: E1126 22:31:21.072150 4903 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-6gzsx_openshift-operators_c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471_0(38193a69b8642e81007ddf21b979714f4c8c4c70fb4b271c135d8c87e70f99e4): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 22:31:21 crc kubenswrapper[4903]: E1126 22:31:21.072225 4903 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-6gzsx_openshift-operators_c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471_0(38193a69b8642e81007ddf21b979714f4c8c4c70fb4b271c135d8c87e70f99e4): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-6gzsx" Nov 26 22:31:21 crc kubenswrapper[4903]: E1126 22:31:21.072256 4903 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-6gzsx_openshift-operators_c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471_0(38193a69b8642e81007ddf21b979714f4c8c4c70fb4b271c135d8c87e70f99e4): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-6gzsx" Nov 26 22:31:21 crc kubenswrapper[4903]: E1126 22:31:21.072310 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-d8bb48f5d-6gzsx_openshift-operators(c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-d8bb48f5d-6gzsx_openshift-operators(c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-6gzsx_openshift-operators_c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471_0(38193a69b8642e81007ddf21b979714f4c8c4c70fb4b271c135d8c87e70f99e4): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-d8bb48f5d-6gzsx" podUID="c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471" Nov 26 22:31:22 crc kubenswrapper[4903]: I1126 22:31:22.028320 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-jw268" Nov 26 22:31:22 crc kubenswrapper[4903]: I1126 22:31:22.028335 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-kfn8d" Nov 26 22:31:22 crc kubenswrapper[4903]: I1126 22:31:22.031498 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-jw268" Nov 26 22:31:22 crc kubenswrapper[4903]: I1126 22:31:22.031637 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-kfn8d" Nov 26 22:31:22 crc kubenswrapper[4903]: E1126 22:31:22.059637 4903 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-kfn8d_openshift-operators_5127cf5c-29a6-484d-9e1c-895e2bb109e3_0(ed85bf482a193da5dd40c512d9edad6fe32d19c1bbd83ae3b727d8bdb15f21ae): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 22:31:22 crc kubenswrapper[4903]: E1126 22:31:22.059716 4903 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-kfn8d_openshift-operators_5127cf5c-29a6-484d-9e1c-895e2bb109e3_0(ed85bf482a193da5dd40c512d9edad6fe32d19c1bbd83ae3b727d8bdb15f21ae): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-kfn8d" Nov 26 22:31:22 crc kubenswrapper[4903]: E1126 22:31:22.059751 4903 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-kfn8d_openshift-operators_5127cf5c-29a6-484d-9e1c-895e2bb109e3_0(ed85bf482a193da5dd40c512d9edad6fe32d19c1bbd83ae3b727d8bdb15f21ae): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-kfn8d" Nov 26 22:31:22 crc kubenswrapper[4903]: E1126 22:31:22.059797 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-668cf9dfbb-kfn8d_openshift-operators(5127cf5c-29a6-484d-9e1c-895e2bb109e3)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-668cf9dfbb-kfn8d_openshift-operators(5127cf5c-29a6-484d-9e1c-895e2bb109e3)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-kfn8d_openshift-operators_5127cf5c-29a6-484d-9e1c-895e2bb109e3_0(ed85bf482a193da5dd40c512d9edad6fe32d19c1bbd83ae3b727d8bdb15f21ae): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-kfn8d" podUID="5127cf5c-29a6-484d-9e1c-895e2bb109e3" Nov 26 22:31:22 crc kubenswrapper[4903]: E1126 22:31:22.063535 4903 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-594b6c89d9-jw268_openshift-operators_7674d75c-8272-4f53-86fe-3fb83d421c63_0(7297c394095756477042c5999c94c8ac33e6fbd819a047d19ee12df1132d8309): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 22:31:22 crc kubenswrapper[4903]: E1126 22:31:22.063602 4903 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-594b6c89d9-jw268_openshift-operators_7674d75c-8272-4f53-86fe-3fb83d421c63_0(7297c394095756477042c5999c94c8ac33e6fbd819a047d19ee12df1132d8309): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-jw268" Nov 26 22:31:22 crc kubenswrapper[4903]: E1126 22:31:22.063627 4903 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-594b6c89d9-jw268_openshift-operators_7674d75c-8272-4f53-86fe-3fb83d421c63_0(7297c394095756477042c5999c94c8ac33e6fbd819a047d19ee12df1132d8309): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-jw268" Nov 26 22:31:22 crc kubenswrapper[4903]: E1126 22:31:22.063671 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-594b6c89d9-jw268_openshift-operators(7674d75c-8272-4f53-86fe-3fb83d421c63)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-594b6c89d9-jw268_openshift-operators(7674d75c-8272-4f53-86fe-3fb83d421c63)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-594b6c89d9-jw268_openshift-operators_7674d75c-8272-4f53-86fe-3fb83d421c63_0(7297c394095756477042c5999c94c8ac33e6fbd819a047d19ee12df1132d8309): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-jw268" podUID="7674d75c-8272-4f53-86fe-3fb83d421c63" Nov 26 22:31:23 crc kubenswrapper[4903]: I1126 22:31:23.027618 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-clswb" Nov 26 22:31:23 crc kubenswrapper[4903]: I1126 22:31:23.028680 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-clswb" Nov 26 22:31:23 crc kubenswrapper[4903]: E1126 22:31:23.066759 4903 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-clswb_openshift-operators_193e7d1c-0f98-4013-aad9-16711a00ab2e_0(0493a4dd917359d3ea5871c6f52564adb37014dcf73cde67d8a963504070fee6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 22:31:23 crc kubenswrapper[4903]: E1126 22:31:23.066857 4903 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-clswb_openshift-operators_193e7d1c-0f98-4013-aad9-16711a00ab2e_0(0493a4dd917359d3ea5871c6f52564adb37014dcf73cde67d8a963504070fee6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-clswb" Nov 26 22:31:23 crc kubenswrapper[4903]: E1126 22:31:23.066898 4903 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-clswb_openshift-operators_193e7d1c-0f98-4013-aad9-16711a00ab2e_0(0493a4dd917359d3ea5871c6f52564adb37014dcf73cde67d8a963504070fee6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-clswb" Nov 26 22:31:23 crc kubenswrapper[4903]: E1126 22:31:23.066974 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5446b9c989-clswb_openshift-operators(193e7d1c-0f98-4013-aad9-16711a00ab2e)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5446b9c989-clswb_openshift-operators(193e7d1c-0f98-4013-aad9-16711a00ab2e)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-clswb_openshift-operators_193e7d1c-0f98-4013-aad9-16711a00ab2e_0(0493a4dd917359d3ea5871c6f52564adb37014dcf73cde67d8a963504070fee6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5446b9c989-clswb" podUID="193e7d1c-0f98-4013-aad9-16711a00ab2e" Nov 26 22:31:24 crc kubenswrapper[4903]: I1126 22:31:24.045846 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b" Nov 26 22:31:24 crc kubenswrapper[4903]: I1126 22:31:24.046325 4903 scope.go:117] "RemoveContainer" containerID="a7e8cfe57c3a57c637ffddf064cb78b7f997c1fa34e6aeee992af477cee52eb0" Nov 26 22:31:24 crc kubenswrapper[4903]: I1126 22:31:24.046374 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b" Nov 26 22:31:24 crc kubenswrapper[4903]: E1126 22:31:24.094566 4903 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b_openshift-operators_68d1419a-288f-4fcb-9d4d-8f9568fa2170_0(d917ea08c008eacf3cff5195bd30cd42cbd62c8577a5e0517407f01aec887352): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 22:31:24 crc kubenswrapper[4903]: E1126 22:31:24.094645 4903 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b_openshift-operators_68d1419a-288f-4fcb-9d4d-8f9568fa2170_0(d917ea08c008eacf3cff5195bd30cd42cbd62c8577a5e0517407f01aec887352): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b" Nov 26 22:31:24 crc kubenswrapper[4903]: E1126 22:31:24.094674 4903 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b_openshift-operators_68d1419a-288f-4fcb-9d4d-8f9568fa2170_0(d917ea08c008eacf3cff5195bd30cd42cbd62c8577a5e0517407f01aec887352): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b" Nov 26 22:31:24 crc kubenswrapper[4903]: E1126 22:31:24.094756 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b_openshift-operators(68d1419a-288f-4fcb-9d4d-8f9568fa2170)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b_openshift-operators(68d1419a-288f-4fcb-9d4d-8f9568fa2170)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b_openshift-operators_68d1419a-288f-4fcb-9d4d-8f9568fa2170_0(d917ea08c008eacf3cff5195bd30cd42cbd62c8577a5e0517407f01aec887352): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b" podUID="68d1419a-288f-4fcb-9d4d-8f9568fa2170" Nov 26 22:31:24 crc kubenswrapper[4903]: I1126 22:31:24.928929 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bxnsh_229974d7-7b78-434b-a346-8b9004e69bf2/kube-multus/2.log" Nov 26 22:31:24 crc kubenswrapper[4903]: I1126 22:31:24.929222 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bxnsh" event={"ID":"229974d7-7b78-434b-a346-8b9004e69bf2","Type":"ContainerStarted","Data":"ee2ad6feacc73c01c493b2e039f0639781a2a8c008ed2f6ad20dfd162c859f44"} Nov 26 22:31:31 crc kubenswrapper[4903]: I1126 22:31:31.980832 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 22:31:31 crc kubenswrapper[4903]: I1126 22:31:31.981775 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 22:31:32 crc kubenswrapper[4903]: I1126 22:31:32.549847 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-6wb7c" Nov 26 22:31:33 crc kubenswrapper[4903]: I1126 22:31:33.028033 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-kfn8d" Nov 26 22:31:33 crc kubenswrapper[4903]: I1126 22:31:33.028635 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-kfn8d" Nov 26 22:31:33 crc kubenswrapper[4903]: I1126 22:31:33.329076 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-kfn8d"] Nov 26 22:31:33 crc kubenswrapper[4903]: I1126 22:31:33.990311 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-kfn8d" event={"ID":"5127cf5c-29a6-484d-9e1c-895e2bb109e3","Type":"ContainerStarted","Data":"b3b49f8aa3c3591d2aaeb58914f3952a2058bb72bcd7f3cfeb9358cbb05cf194"} Nov 26 22:31:35 crc kubenswrapper[4903]: I1126 22:31:35.027775 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-clswb" Nov 26 22:31:35 crc kubenswrapper[4903]: I1126 22:31:35.027850 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-6gzsx" Nov 26 22:31:35 crc kubenswrapper[4903]: I1126 22:31:35.027864 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-jw268" Nov 26 22:31:35 crc kubenswrapper[4903]: I1126 22:31:35.028368 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-clswb" Nov 26 22:31:35 crc kubenswrapper[4903]: I1126 22:31:35.028425 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-6gzsx" Nov 26 22:31:35 crc kubenswrapper[4903]: I1126 22:31:35.028628 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-jw268" Nov 26 22:31:35 crc kubenswrapper[4903]: I1126 22:31:35.523499 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-jw268"] Nov 26 22:31:35 crc kubenswrapper[4903]: I1126 22:31:35.529207 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-6gzsx"] Nov 26 22:31:35 crc kubenswrapper[4903]: I1126 22:31:35.532831 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-clswb"] Nov 26 22:31:35 crc kubenswrapper[4903]: W1126 22:31:35.541663 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7674d75c_8272_4f53_86fe_3fb83d421c63.slice/crio-2ebc28207357860924b9f8c3089cd7335014ef17a9fda0cb5e2342f588e7ed26 WatchSource:0}: Error finding container 2ebc28207357860924b9f8c3089cd7335014ef17a9fda0cb5e2342f588e7ed26: Status 404 returned error can't find the container with id 2ebc28207357860924b9f8c3089cd7335014ef17a9fda0cb5e2342f588e7ed26 Nov 26 22:31:35 crc kubenswrapper[4903]: W1126 22:31:35.543598 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod193e7d1c_0f98_4013_aad9_16711a00ab2e.slice/crio-0d36bcdb1dbce22ed292f2fc8cd6f8a54c0c2097abc19fe823521fbd8c94d704 WatchSource:0}: Error finding container 0d36bcdb1dbce22ed292f2fc8cd6f8a54c0c2097abc19fe823521fbd8c94d704: Status 404 returned error can't find the container with id 0d36bcdb1dbce22ed292f2fc8cd6f8a54c0c2097abc19fe823521fbd8c94d704 Nov 26 22:31:35 crc kubenswrapper[4903]: W1126 22:31:35.549445 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc3c5c9e5_d4c4_4fcd_8c0c_dee6f7ce6471.slice/crio-51bee8e5a75e2b85f44a222b4df0aa9cff8bbcbab256523dbc9d7ce86654beae WatchSource:0}: Error finding container 51bee8e5a75e2b85f44a222b4df0aa9cff8bbcbab256523dbc9d7ce86654beae: Status 404 returned error can't find the container with id 51bee8e5a75e2b85f44a222b4df0aa9cff8bbcbab256523dbc9d7ce86654beae Nov 26 22:31:36 crc kubenswrapper[4903]: I1126 22:31:36.006657 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-6gzsx" event={"ID":"c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471","Type":"ContainerStarted","Data":"51bee8e5a75e2b85f44a222b4df0aa9cff8bbcbab256523dbc9d7ce86654beae"} Nov 26 22:31:36 crc kubenswrapper[4903]: I1126 22:31:36.008147 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-jw268" event={"ID":"7674d75c-8272-4f53-86fe-3fb83d421c63","Type":"ContainerStarted","Data":"2ebc28207357860924b9f8c3089cd7335014ef17a9fda0cb5e2342f588e7ed26"} Nov 26 22:31:36 crc kubenswrapper[4903]: I1126 22:31:36.009304 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-clswb" event={"ID":"193e7d1c-0f98-4013-aad9-16711a00ab2e","Type":"ContainerStarted","Data":"0d36bcdb1dbce22ed292f2fc8cd6f8a54c0c2097abc19fe823521fbd8c94d704"} Nov 26 22:31:38 crc kubenswrapper[4903]: I1126 22:31:38.028552 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b" Nov 26 22:31:38 crc kubenswrapper[4903]: I1126 22:31:38.030018 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b" Nov 26 22:31:41 crc kubenswrapper[4903]: I1126 22:31:41.122530 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b"] Nov 26 22:31:41 crc kubenswrapper[4903]: W1126 22:31:41.141402 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod68d1419a_288f_4fcb_9d4d_8f9568fa2170.slice/crio-e9c48539047331ee9dc44574d00abdc81d1132d2885b601c0ac43e30d4659763 WatchSource:0}: Error finding container e9c48539047331ee9dc44574d00abdc81d1132d2885b601c0ac43e30d4659763: Status 404 returned error can't find the container with id e9c48539047331ee9dc44574d00abdc81d1132d2885b601c0ac43e30d4659763 Nov 26 22:31:42 crc kubenswrapper[4903]: I1126 22:31:42.064210 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-kfn8d" event={"ID":"5127cf5c-29a6-484d-9e1c-895e2bb109e3","Type":"ContainerStarted","Data":"612c3762a31fa89b74e3944d17c44737de0bbbf37959c7eb1e68cd293df22176"} Nov 26 22:31:42 crc kubenswrapper[4903]: I1126 22:31:42.067023 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b" event={"ID":"68d1419a-288f-4fcb-9d4d-8f9568fa2170","Type":"ContainerStarted","Data":"e9c48539047331ee9dc44574d00abdc81d1132d2885b601c0ac43e30d4659763"} Nov 26 22:31:42 crc kubenswrapper[4903]: I1126 22:31:42.119610 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-kfn8d" podStartSLOduration=27.686529023 podStartE2EDuration="35.119591207s" podCreationTimestamp="2025-11-26 22:31:07 +0000 UTC" firstStartedPulling="2025-11-26 22:31:33.335512858 +0000 UTC m=+622.025747768" lastFinishedPulling="2025-11-26 22:31:40.768575032 +0000 UTC m=+629.458809952" observedRunningTime="2025-11-26 22:31:42.116044262 +0000 UTC m=+630.806279172" watchObservedRunningTime="2025-11-26 22:31:42.119591207 +0000 UTC m=+630.809826127" Nov 26 22:31:43 crc kubenswrapper[4903]: I1126 22:31:43.075609 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-clswb" event={"ID":"193e7d1c-0f98-4013-aad9-16711a00ab2e","Type":"ContainerStarted","Data":"b3f93d2efbfdb5ff4be3aff77de1ff5ef4e8c46839aeae1421dbb23f38598644"} Nov 26 22:31:43 crc kubenswrapper[4903]: I1126 22:31:43.082276 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-jw268" event={"ID":"7674d75c-8272-4f53-86fe-3fb83d421c63","Type":"ContainerStarted","Data":"8286dfd56fe50b381b1152689a8956d6e81cf3e0b21fe65a47e7ebf76905843c"} Nov 26 22:31:43 crc kubenswrapper[4903]: I1126 22:31:43.096482 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5446b9c989-clswb" podStartSLOduration=28.130471778 podStartE2EDuration="35.096468824s" podCreationTimestamp="2025-11-26 22:31:08 +0000 UTC" firstStartedPulling="2025-11-26 22:31:35.546642513 +0000 UTC m=+624.236877423" lastFinishedPulling="2025-11-26 22:31:42.512639559 +0000 UTC m=+631.202874469" observedRunningTime="2025-11-26 22:31:43.095521518 +0000 UTC m=+631.785756448" watchObservedRunningTime="2025-11-26 22:31:43.096468824 +0000 UTC m=+631.786703734" Nov 26 22:31:43 crc kubenswrapper[4903]: I1126 22:31:43.101383 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b" event={"ID":"68d1419a-288f-4fcb-9d4d-8f9568fa2170","Type":"ContainerStarted","Data":"d03c9d80039acf11adffbeb3549c1cbe5d23dee2c16401fc4d07f909d7f1d854"} Nov 26 22:31:43 crc kubenswrapper[4903]: I1126 22:31:43.130531 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-jw268" podStartSLOduration=29.183089996 podStartE2EDuration="36.130514188s" podCreationTimestamp="2025-11-26 22:31:07 +0000 UTC" firstStartedPulling="2025-11-26 22:31:35.546940211 +0000 UTC m=+624.237175111" lastFinishedPulling="2025-11-26 22:31:42.494364393 +0000 UTC m=+631.184599303" observedRunningTime="2025-11-26 22:31:43.113200828 +0000 UTC m=+631.803435738" watchObservedRunningTime="2025-11-26 22:31:43.130514188 +0000 UTC m=+631.820749098" Nov 26 22:31:43 crc kubenswrapper[4903]: I1126 22:31:43.130760 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b" podStartSLOduration=34.771708612 podStartE2EDuration="36.130757384s" podCreationTimestamp="2025-11-26 22:31:07 +0000 UTC" firstStartedPulling="2025-11-26 22:31:41.154456739 +0000 UTC m=+629.844691659" lastFinishedPulling="2025-11-26 22:31:42.513505521 +0000 UTC m=+631.203740431" observedRunningTime="2025-11-26 22:31:43.12876667 +0000 UTC m=+631.819001590" watchObservedRunningTime="2025-11-26 22:31:43.130757384 +0000 UTC m=+631.820992294" Nov 26 22:31:44 crc kubenswrapper[4903]: I1126 22:31:44.109212 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5446b9c989-clswb" Nov 26 22:31:46 crc kubenswrapper[4903]: I1126 22:31:46.125950 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-6gzsx" event={"ID":"c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471","Type":"ContainerStarted","Data":"7a3ba1c96619ba7cb4cda045b0bd80dcf2c5b8dca79cf9767f1501d7ac88beff"} Nov 26 22:31:46 crc kubenswrapper[4903]: I1126 22:31:46.126230 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-d8bb48f5d-6gzsx" Nov 26 22:31:46 crc kubenswrapper[4903]: I1126 22:31:46.128087 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-d8bb48f5d-6gzsx" Nov 26 22:31:46 crc kubenswrapper[4903]: I1126 22:31:46.161984 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-d8bb48f5d-6gzsx" podStartSLOduration=29.568293973 podStartE2EDuration="39.161961161s" podCreationTimestamp="2025-11-26 22:31:07 +0000 UTC" firstStartedPulling="2025-11-26 22:31:35.557284871 +0000 UTC m=+624.247519811" lastFinishedPulling="2025-11-26 22:31:45.150952049 +0000 UTC m=+633.841186999" observedRunningTime="2025-11-26 22:31:46.15452558 +0000 UTC m=+634.844760530" watchObservedRunningTime="2025-11-26 22:31:46.161961161 +0000 UTC m=+634.852196101" Nov 26 22:31:48 crc kubenswrapper[4903]: I1126 22:31:48.429865 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5446b9c989-clswb" Nov 26 22:31:53 crc kubenswrapper[4903]: I1126 22:31:53.170250 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-dfvzf"] Nov 26 22:31:53 crc kubenswrapper[4903]: I1126 22:31:53.171537 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-dfvzf" Nov 26 22:31:53 crc kubenswrapper[4903]: I1126 22:31:53.177138 4903 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-qq9fm" Nov 26 22:31:53 crc kubenswrapper[4903]: I1126 22:31:53.177248 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 26 22:31:53 crc kubenswrapper[4903]: I1126 22:31:53.177362 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 26 22:31:53 crc kubenswrapper[4903]: I1126 22:31:53.183878 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-dfvzf"] Nov 26 22:31:53 crc kubenswrapper[4903]: I1126 22:31:53.195378 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-qdk8j"] Nov 26 22:31:53 crc kubenswrapper[4903]: I1126 22:31:53.196259 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-qdk8j" Nov 26 22:31:53 crc kubenswrapper[4903]: I1126 22:31:53.200217 4903 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-vtsfk" Nov 26 22:31:53 crc kubenswrapper[4903]: I1126 22:31:53.221247 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-zz98c"] Nov 26 22:31:53 crc kubenswrapper[4903]: I1126 22:31:53.222126 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-zz98c" Nov 26 22:31:53 crc kubenswrapper[4903]: I1126 22:31:53.225989 4903 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-7stmr" Nov 26 22:31:53 crc kubenswrapper[4903]: I1126 22:31:53.242428 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-zz98c"] Nov 26 22:31:53 crc kubenswrapper[4903]: I1126 22:31:53.260109 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-qdk8j"] Nov 26 22:31:53 crc kubenswrapper[4903]: I1126 22:31:53.359822 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sn4zd\" (UniqueName: \"kubernetes.io/projected/de11c064-60b1-4f96-a316-bc903f061766-kube-api-access-sn4zd\") pod \"cert-manager-5b446d88c5-qdk8j\" (UID: \"de11c064-60b1-4f96-a316-bc903f061766\") " pod="cert-manager/cert-manager-5b446d88c5-qdk8j" Nov 26 22:31:53 crc kubenswrapper[4903]: I1126 22:31:53.359887 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xdvv\" (UniqueName: \"kubernetes.io/projected/da38aa1b-878d-476e-b742-7329a813bf99-kube-api-access-8xdvv\") pod \"cert-manager-webhook-5655c58dd6-zz98c\" (UID: \"da38aa1b-878d-476e-b742-7329a813bf99\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-zz98c" Nov 26 22:31:53 crc kubenswrapper[4903]: I1126 22:31:53.359941 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nz5d6\" (UniqueName: \"kubernetes.io/projected/580a58c8-ce17-4d85-991a-e51d3eb639b3-kube-api-access-nz5d6\") pod \"cert-manager-cainjector-7f985d654d-dfvzf\" (UID: \"580a58c8-ce17-4d85-991a-e51d3eb639b3\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-dfvzf" Nov 26 22:31:53 crc kubenswrapper[4903]: I1126 22:31:53.460798 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sn4zd\" (UniqueName: \"kubernetes.io/projected/de11c064-60b1-4f96-a316-bc903f061766-kube-api-access-sn4zd\") pod \"cert-manager-5b446d88c5-qdk8j\" (UID: \"de11c064-60b1-4f96-a316-bc903f061766\") " pod="cert-manager/cert-manager-5b446d88c5-qdk8j" Nov 26 22:31:53 crc kubenswrapper[4903]: I1126 22:31:53.460858 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8xdvv\" (UniqueName: \"kubernetes.io/projected/da38aa1b-878d-476e-b742-7329a813bf99-kube-api-access-8xdvv\") pod \"cert-manager-webhook-5655c58dd6-zz98c\" (UID: \"da38aa1b-878d-476e-b742-7329a813bf99\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-zz98c" Nov 26 22:31:53 crc kubenswrapper[4903]: I1126 22:31:53.460905 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nz5d6\" (UniqueName: \"kubernetes.io/projected/580a58c8-ce17-4d85-991a-e51d3eb639b3-kube-api-access-nz5d6\") pod \"cert-manager-cainjector-7f985d654d-dfvzf\" (UID: \"580a58c8-ce17-4d85-991a-e51d3eb639b3\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-dfvzf" Nov 26 22:31:53 crc kubenswrapper[4903]: I1126 22:31:53.481437 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sn4zd\" (UniqueName: \"kubernetes.io/projected/de11c064-60b1-4f96-a316-bc903f061766-kube-api-access-sn4zd\") pod \"cert-manager-5b446d88c5-qdk8j\" (UID: \"de11c064-60b1-4f96-a316-bc903f061766\") " pod="cert-manager/cert-manager-5b446d88c5-qdk8j" Nov 26 22:31:53 crc kubenswrapper[4903]: I1126 22:31:53.481465 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xdvv\" (UniqueName: \"kubernetes.io/projected/da38aa1b-878d-476e-b742-7329a813bf99-kube-api-access-8xdvv\") pod \"cert-manager-webhook-5655c58dd6-zz98c\" (UID: \"da38aa1b-878d-476e-b742-7329a813bf99\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-zz98c" Nov 26 22:31:53 crc kubenswrapper[4903]: I1126 22:31:53.499917 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nz5d6\" (UniqueName: \"kubernetes.io/projected/580a58c8-ce17-4d85-991a-e51d3eb639b3-kube-api-access-nz5d6\") pod \"cert-manager-cainjector-7f985d654d-dfvzf\" (UID: \"580a58c8-ce17-4d85-991a-e51d3eb639b3\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-dfvzf" Nov 26 22:31:53 crc kubenswrapper[4903]: I1126 22:31:53.515476 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-qdk8j" Nov 26 22:31:53 crc kubenswrapper[4903]: I1126 22:31:53.541847 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-zz98c" Nov 26 22:31:53 crc kubenswrapper[4903]: I1126 22:31:53.789115 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-dfvzf" Nov 26 22:31:53 crc kubenswrapper[4903]: I1126 22:31:53.825061 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-qdk8j"] Nov 26 22:31:53 crc kubenswrapper[4903]: I1126 22:31:53.876597 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-zz98c"] Nov 26 22:31:54 crc kubenswrapper[4903]: I1126 22:31:54.182204 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-zz98c" event={"ID":"da38aa1b-878d-476e-b742-7329a813bf99","Type":"ContainerStarted","Data":"700429b278c8f9cfe77224b7c23ecc8aef5680051ebfefc5457b4c68ef9e038b"} Nov 26 22:31:54 crc kubenswrapper[4903]: I1126 22:31:54.183762 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-qdk8j" event={"ID":"de11c064-60b1-4f96-a316-bc903f061766","Type":"ContainerStarted","Data":"bf3fabcc01efe21e0c3e38cb3dffca7da2ad6433e83022a9b603004bc1dd252e"} Nov 26 22:31:54 crc kubenswrapper[4903]: I1126 22:31:54.454319 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-dfvzf"] Nov 26 22:31:54 crc kubenswrapper[4903]: W1126 22:31:54.456142 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod580a58c8_ce17_4d85_991a_e51d3eb639b3.slice/crio-43bf5edd9bd2e4ccddce1ddb56bc01f2912ad3275e366fc229f6cdabafafd269 WatchSource:0}: Error finding container 43bf5edd9bd2e4ccddce1ddb56bc01f2912ad3275e366fc229f6cdabafafd269: Status 404 returned error can't find the container with id 43bf5edd9bd2e4ccddce1ddb56bc01f2912ad3275e366fc229f6cdabafafd269 Nov 26 22:31:55 crc kubenswrapper[4903]: I1126 22:31:55.197757 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-dfvzf" event={"ID":"580a58c8-ce17-4d85-991a-e51d3eb639b3","Type":"ContainerStarted","Data":"43bf5edd9bd2e4ccddce1ddb56bc01f2912ad3275e366fc229f6cdabafafd269"} Nov 26 22:31:58 crc kubenswrapper[4903]: I1126 22:31:58.224606 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-zz98c" event={"ID":"da38aa1b-878d-476e-b742-7329a813bf99","Type":"ContainerStarted","Data":"04b36f0d3b8afa06bd8e02e00fe41ec260f95149c7eafe1c5908429534c08639"} Nov 26 22:31:58 crc kubenswrapper[4903]: I1126 22:31:58.225038 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-zz98c" Nov 26 22:31:58 crc kubenswrapper[4903]: I1126 22:31:58.226316 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-qdk8j" event={"ID":"de11c064-60b1-4f96-a316-bc903f061766","Type":"ContainerStarted","Data":"7021f26343b9006bc74dda37289ee15e2590e70ff5c00d899c335ede8e43dfab"} Nov 26 22:31:58 crc kubenswrapper[4903]: I1126 22:31:58.240733 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-zz98c" podStartSLOduration=2.119987018 podStartE2EDuration="5.240713255s" podCreationTimestamp="2025-11-26 22:31:53 +0000 UTC" firstStartedPulling="2025-11-26 22:31:53.921068519 +0000 UTC m=+642.611303439" lastFinishedPulling="2025-11-26 22:31:57.041794736 +0000 UTC m=+645.732029676" observedRunningTime="2025-11-26 22:31:58.237473137 +0000 UTC m=+646.927708057" watchObservedRunningTime="2025-11-26 22:31:58.240713255 +0000 UTC m=+646.930948185" Nov 26 22:31:58 crc kubenswrapper[4903]: I1126 22:31:58.264640 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-qdk8j" podStartSLOduration=2.020454889 podStartE2EDuration="5.264612063s" podCreationTimestamp="2025-11-26 22:31:53 +0000 UTC" firstStartedPulling="2025-11-26 22:31:53.850886536 +0000 UTC m=+642.541121446" lastFinishedPulling="2025-11-26 22:31:57.09504367 +0000 UTC m=+645.785278620" observedRunningTime="2025-11-26 22:31:58.25453666 +0000 UTC m=+646.944771560" watchObservedRunningTime="2025-11-26 22:31:58.264612063 +0000 UTC m=+646.954847013" Nov 26 22:31:59 crc kubenswrapper[4903]: I1126 22:31:59.236290 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-dfvzf" event={"ID":"580a58c8-ce17-4d85-991a-e51d3eb639b3","Type":"ContainerStarted","Data":"3426b2d0e3f7e8a4e497ee7604da437b01133a3ef07a12e33c2e751479b1b5db"} Nov 26 22:31:59 crc kubenswrapper[4903]: I1126 22:31:59.257271 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-dfvzf" podStartSLOduration=2.406139591 podStartE2EDuration="6.257244297s" podCreationTimestamp="2025-11-26 22:31:53 +0000 UTC" firstStartedPulling="2025-11-26 22:31:54.458204559 +0000 UTC m=+643.148439469" lastFinishedPulling="2025-11-26 22:31:58.309309265 +0000 UTC m=+646.999544175" observedRunningTime="2025-11-26 22:31:59.256660492 +0000 UTC m=+647.946895432" watchObservedRunningTime="2025-11-26 22:31:59.257244297 +0000 UTC m=+647.947479237" Nov 26 22:32:01 crc kubenswrapper[4903]: I1126 22:32:01.980974 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 22:32:01 crc kubenswrapper[4903]: I1126 22:32:01.981360 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 22:32:01 crc kubenswrapper[4903]: I1126 22:32:01.981422 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 22:32:01 crc kubenswrapper[4903]: I1126 22:32:01.982304 4903 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2aaaca3e91746b5d4a5250d027be0943958bdff764bee382a723226bf824aef4"} pod="openshift-machine-config-operator/machine-config-daemon-wjwph" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 22:32:01 crc kubenswrapper[4903]: I1126 22:32:01.982403 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" containerID="cri-o://2aaaca3e91746b5d4a5250d027be0943958bdff764bee382a723226bf824aef4" gracePeriod=600 Nov 26 22:32:02 crc kubenswrapper[4903]: I1126 22:32:02.268401 4903 generic.go:334] "Generic (PLEG): container finished" podID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerID="2aaaca3e91746b5d4a5250d027be0943958bdff764bee382a723226bf824aef4" exitCode=0 Nov 26 22:32:02 crc kubenswrapper[4903]: I1126 22:32:02.268830 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerDied","Data":"2aaaca3e91746b5d4a5250d027be0943958bdff764bee382a723226bf824aef4"} Nov 26 22:32:02 crc kubenswrapper[4903]: I1126 22:32:02.268881 4903 scope.go:117] "RemoveContainer" containerID="96e687a4eac5ec0d09c0b75e4590018ddcce7bd80d552c8e11b1f99591cbaa37" Nov 26 22:32:03 crc kubenswrapper[4903]: I1126 22:32:03.281160 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerStarted","Data":"f36c7ac66ee1d12afd427e767b1119231b90a975fb7c25821f106b8b5f5dcac1"} Nov 26 22:32:03 crc kubenswrapper[4903]: I1126 22:32:03.545996 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-zz98c" Nov 26 22:32:26 crc kubenswrapper[4903]: I1126 22:32:26.626603 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75"] Nov 26 22:32:26 crc kubenswrapper[4903]: I1126 22:32:26.628549 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75" Nov 26 22:32:26 crc kubenswrapper[4903]: I1126 22:32:26.630930 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 26 22:32:26 crc kubenswrapper[4903]: I1126 22:32:26.649166 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75"] Nov 26 22:32:26 crc kubenswrapper[4903]: I1126 22:32:26.742019 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ff6ba428-63b4-4a8d-9b52-9d7dd77d0430-bundle\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75\" (UID: \"ff6ba428-63b4-4a8d-9b52-9d7dd77d0430\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75" Nov 26 22:32:26 crc kubenswrapper[4903]: I1126 22:32:26.742074 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ff6ba428-63b4-4a8d-9b52-9d7dd77d0430-util\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75\" (UID: \"ff6ba428-63b4-4a8d-9b52-9d7dd77d0430\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75" Nov 26 22:32:26 crc kubenswrapper[4903]: I1126 22:32:26.742099 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7sc49\" (UniqueName: \"kubernetes.io/projected/ff6ba428-63b4-4a8d-9b52-9d7dd77d0430-kube-api-access-7sc49\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75\" (UID: \"ff6ba428-63b4-4a8d-9b52-9d7dd77d0430\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75" Nov 26 22:32:26 crc kubenswrapper[4903]: I1126 22:32:26.821582 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z"] Nov 26 22:32:26 crc kubenswrapper[4903]: I1126 22:32:26.823829 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z" Nov 26 22:32:26 crc kubenswrapper[4903]: I1126 22:32:26.837369 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z"] Nov 26 22:32:26 crc kubenswrapper[4903]: I1126 22:32:26.843335 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ff6ba428-63b4-4a8d-9b52-9d7dd77d0430-bundle\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75\" (UID: \"ff6ba428-63b4-4a8d-9b52-9d7dd77d0430\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75" Nov 26 22:32:26 crc kubenswrapper[4903]: I1126 22:32:26.843395 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ff6ba428-63b4-4a8d-9b52-9d7dd77d0430-util\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75\" (UID: \"ff6ba428-63b4-4a8d-9b52-9d7dd77d0430\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75" Nov 26 22:32:26 crc kubenswrapper[4903]: I1126 22:32:26.843437 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7sc49\" (UniqueName: \"kubernetes.io/projected/ff6ba428-63b4-4a8d-9b52-9d7dd77d0430-kube-api-access-7sc49\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75\" (UID: \"ff6ba428-63b4-4a8d-9b52-9d7dd77d0430\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75" Nov 26 22:32:26 crc kubenswrapper[4903]: I1126 22:32:26.843924 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ff6ba428-63b4-4a8d-9b52-9d7dd77d0430-bundle\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75\" (UID: \"ff6ba428-63b4-4a8d-9b52-9d7dd77d0430\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75" Nov 26 22:32:26 crc kubenswrapper[4903]: I1126 22:32:26.843950 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ff6ba428-63b4-4a8d-9b52-9d7dd77d0430-util\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75\" (UID: \"ff6ba428-63b4-4a8d-9b52-9d7dd77d0430\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75" Nov 26 22:32:26 crc kubenswrapper[4903]: I1126 22:32:26.880856 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7sc49\" (UniqueName: \"kubernetes.io/projected/ff6ba428-63b4-4a8d-9b52-9d7dd77d0430-kube-api-access-7sc49\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75\" (UID: \"ff6ba428-63b4-4a8d-9b52-9d7dd77d0430\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75" Nov 26 22:32:26 crc kubenswrapper[4903]: I1126 22:32:26.943151 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75" Nov 26 22:32:26 crc kubenswrapper[4903]: I1126 22:32:26.944430 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ed8d8216-366b-44a5-b2fd-0b3ad381efc9-bundle\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z\" (UID: \"ed8d8216-366b-44a5-b2fd-0b3ad381efc9\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z" Nov 26 22:32:26 crc kubenswrapper[4903]: I1126 22:32:26.944519 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ed8d8216-366b-44a5-b2fd-0b3ad381efc9-util\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z\" (UID: \"ed8d8216-366b-44a5-b2fd-0b3ad381efc9\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z" Nov 26 22:32:26 crc kubenswrapper[4903]: I1126 22:32:26.944562 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9c8db\" (UniqueName: \"kubernetes.io/projected/ed8d8216-366b-44a5-b2fd-0b3ad381efc9-kube-api-access-9c8db\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z\" (UID: \"ed8d8216-366b-44a5-b2fd-0b3ad381efc9\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z" Nov 26 22:32:27 crc kubenswrapper[4903]: I1126 22:32:27.046163 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ed8d8216-366b-44a5-b2fd-0b3ad381efc9-bundle\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z\" (UID: \"ed8d8216-366b-44a5-b2fd-0b3ad381efc9\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z" Nov 26 22:32:27 crc kubenswrapper[4903]: I1126 22:32:27.046239 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ed8d8216-366b-44a5-b2fd-0b3ad381efc9-util\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z\" (UID: \"ed8d8216-366b-44a5-b2fd-0b3ad381efc9\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z" Nov 26 22:32:27 crc kubenswrapper[4903]: I1126 22:32:27.046279 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9c8db\" (UniqueName: \"kubernetes.io/projected/ed8d8216-366b-44a5-b2fd-0b3ad381efc9-kube-api-access-9c8db\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z\" (UID: \"ed8d8216-366b-44a5-b2fd-0b3ad381efc9\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z" Nov 26 22:32:27 crc kubenswrapper[4903]: I1126 22:32:27.047369 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ed8d8216-366b-44a5-b2fd-0b3ad381efc9-bundle\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z\" (UID: \"ed8d8216-366b-44a5-b2fd-0b3ad381efc9\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z" Nov 26 22:32:27 crc kubenswrapper[4903]: I1126 22:32:27.047656 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ed8d8216-366b-44a5-b2fd-0b3ad381efc9-util\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z\" (UID: \"ed8d8216-366b-44a5-b2fd-0b3ad381efc9\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z" Nov 26 22:32:27 crc kubenswrapper[4903]: I1126 22:32:27.080101 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9c8db\" (UniqueName: \"kubernetes.io/projected/ed8d8216-366b-44a5-b2fd-0b3ad381efc9-kube-api-access-9c8db\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z\" (UID: \"ed8d8216-366b-44a5-b2fd-0b3ad381efc9\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z" Nov 26 22:32:27 crc kubenswrapper[4903]: I1126 22:32:27.141314 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z" Nov 26 22:32:27 crc kubenswrapper[4903]: I1126 22:32:27.394133 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z"] Nov 26 22:32:27 crc kubenswrapper[4903]: I1126 22:32:27.407309 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75"] Nov 26 22:32:27 crc kubenswrapper[4903]: W1126 22:32:27.422168 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podff6ba428_63b4_4a8d_9b52_9d7dd77d0430.slice/crio-9dced2cb2b8ff737ac891200cb76f7e7a998be354bc7a1febe97ba66cf998d0a WatchSource:0}: Error finding container 9dced2cb2b8ff737ac891200cb76f7e7a998be354bc7a1febe97ba66cf998d0a: Status 404 returned error can't find the container with id 9dced2cb2b8ff737ac891200cb76f7e7a998be354bc7a1febe97ba66cf998d0a Nov 26 22:32:27 crc kubenswrapper[4903]: I1126 22:32:27.491845 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z" event={"ID":"ed8d8216-366b-44a5-b2fd-0b3ad381efc9","Type":"ContainerStarted","Data":"1d3c3ed1b60c57c97b94b2b6623c611cc8649d66dcefbac30f7323b8a8aa3528"} Nov 26 22:32:27 crc kubenswrapper[4903]: I1126 22:32:27.493353 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75" event={"ID":"ff6ba428-63b4-4a8d-9b52-9d7dd77d0430","Type":"ContainerStarted","Data":"9dced2cb2b8ff737ac891200cb76f7e7a998be354bc7a1febe97ba66cf998d0a"} Nov 26 22:32:28 crc kubenswrapper[4903]: I1126 22:32:28.504588 4903 generic.go:334] "Generic (PLEG): container finished" podID="ff6ba428-63b4-4a8d-9b52-9d7dd77d0430" containerID="696912505f8d849ed2a31c91945ab05bb911148edc90066683189fa01afa2cdb" exitCode=0 Nov 26 22:32:28 crc kubenswrapper[4903]: I1126 22:32:28.504651 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75" event={"ID":"ff6ba428-63b4-4a8d-9b52-9d7dd77d0430","Type":"ContainerDied","Data":"696912505f8d849ed2a31c91945ab05bb911148edc90066683189fa01afa2cdb"} Nov 26 22:32:28 crc kubenswrapper[4903]: I1126 22:32:28.509044 4903 generic.go:334] "Generic (PLEG): container finished" podID="ed8d8216-366b-44a5-b2fd-0b3ad381efc9" containerID="b2af382aff0a9fcbbcfae4fbf8f70f83c976b89faba3ea4038befcb589b38fe3" exitCode=0 Nov 26 22:32:28 crc kubenswrapper[4903]: I1126 22:32:28.509085 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z" event={"ID":"ed8d8216-366b-44a5-b2fd-0b3ad381efc9","Type":"ContainerDied","Data":"b2af382aff0a9fcbbcfae4fbf8f70f83c976b89faba3ea4038befcb589b38fe3"} Nov 26 22:32:30 crc kubenswrapper[4903]: I1126 22:32:30.528565 4903 generic.go:334] "Generic (PLEG): container finished" podID="ff6ba428-63b4-4a8d-9b52-9d7dd77d0430" containerID="4de40038fa26bed991099d9b091e20cf34ceda3cc99fc37c4617d430d9bfb656" exitCode=0 Nov 26 22:32:30 crc kubenswrapper[4903]: I1126 22:32:30.528678 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75" event={"ID":"ff6ba428-63b4-4a8d-9b52-9d7dd77d0430","Type":"ContainerDied","Data":"4de40038fa26bed991099d9b091e20cf34ceda3cc99fc37c4617d430d9bfb656"} Nov 26 22:32:30 crc kubenswrapper[4903]: I1126 22:32:30.536045 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z" event={"ID":"ed8d8216-366b-44a5-b2fd-0b3ad381efc9","Type":"ContainerDied","Data":"af3d12eecf57b80f990e52c6fb18fb78d7d53555d47611e0293218cba6cbedb4"} Nov 26 22:32:30 crc kubenswrapper[4903]: I1126 22:32:30.536618 4903 generic.go:334] "Generic (PLEG): container finished" podID="ed8d8216-366b-44a5-b2fd-0b3ad381efc9" containerID="af3d12eecf57b80f990e52c6fb18fb78d7d53555d47611e0293218cba6cbedb4" exitCode=0 Nov 26 22:32:31 crc kubenswrapper[4903]: I1126 22:32:31.549864 4903 generic.go:334] "Generic (PLEG): container finished" podID="ff6ba428-63b4-4a8d-9b52-9d7dd77d0430" containerID="82ed06103474305e1a0b9ac4a173e7063a654c61339bbc1ae6394847ac84d4ae" exitCode=0 Nov 26 22:32:31 crc kubenswrapper[4903]: I1126 22:32:31.549991 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75" event={"ID":"ff6ba428-63b4-4a8d-9b52-9d7dd77d0430","Type":"ContainerDied","Data":"82ed06103474305e1a0b9ac4a173e7063a654c61339bbc1ae6394847ac84d4ae"} Nov 26 22:32:31 crc kubenswrapper[4903]: I1126 22:32:31.553417 4903 generic.go:334] "Generic (PLEG): container finished" podID="ed8d8216-366b-44a5-b2fd-0b3ad381efc9" containerID="5aa25d79817853cae60c9834fdd486b0d5692e6a0dd466d106e222cf91d89f66" exitCode=0 Nov 26 22:32:31 crc kubenswrapper[4903]: I1126 22:32:31.553462 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z" event={"ID":"ed8d8216-366b-44a5-b2fd-0b3ad381efc9","Type":"ContainerDied","Data":"5aa25d79817853cae60c9834fdd486b0d5692e6a0dd466d106e222cf91d89f66"} Nov 26 22:32:32 crc kubenswrapper[4903]: I1126 22:32:32.895781 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75" Nov 26 22:32:33 crc kubenswrapper[4903]: I1126 22:32:33.011888 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z" Nov 26 22:32:33 crc kubenswrapper[4903]: I1126 22:32:33.039553 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ff6ba428-63b4-4a8d-9b52-9d7dd77d0430-bundle\") pod \"ff6ba428-63b4-4a8d-9b52-9d7dd77d0430\" (UID: \"ff6ba428-63b4-4a8d-9b52-9d7dd77d0430\") " Nov 26 22:32:33 crc kubenswrapper[4903]: I1126 22:32:33.039658 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7sc49\" (UniqueName: \"kubernetes.io/projected/ff6ba428-63b4-4a8d-9b52-9d7dd77d0430-kube-api-access-7sc49\") pod \"ff6ba428-63b4-4a8d-9b52-9d7dd77d0430\" (UID: \"ff6ba428-63b4-4a8d-9b52-9d7dd77d0430\") " Nov 26 22:32:33 crc kubenswrapper[4903]: I1126 22:32:33.039806 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ff6ba428-63b4-4a8d-9b52-9d7dd77d0430-util\") pod \"ff6ba428-63b4-4a8d-9b52-9d7dd77d0430\" (UID: \"ff6ba428-63b4-4a8d-9b52-9d7dd77d0430\") " Nov 26 22:32:33 crc kubenswrapper[4903]: I1126 22:32:33.040390 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff6ba428-63b4-4a8d-9b52-9d7dd77d0430-bundle" (OuterVolumeSpecName: "bundle") pod "ff6ba428-63b4-4a8d-9b52-9d7dd77d0430" (UID: "ff6ba428-63b4-4a8d-9b52-9d7dd77d0430"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:32:33 crc kubenswrapper[4903]: I1126 22:32:33.044470 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff6ba428-63b4-4a8d-9b52-9d7dd77d0430-kube-api-access-7sc49" (OuterVolumeSpecName: "kube-api-access-7sc49") pod "ff6ba428-63b4-4a8d-9b52-9d7dd77d0430" (UID: "ff6ba428-63b4-4a8d-9b52-9d7dd77d0430"). InnerVolumeSpecName "kube-api-access-7sc49". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:32:33 crc kubenswrapper[4903]: I1126 22:32:33.140855 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9c8db\" (UniqueName: \"kubernetes.io/projected/ed8d8216-366b-44a5-b2fd-0b3ad381efc9-kube-api-access-9c8db\") pod \"ed8d8216-366b-44a5-b2fd-0b3ad381efc9\" (UID: \"ed8d8216-366b-44a5-b2fd-0b3ad381efc9\") " Nov 26 22:32:33 crc kubenswrapper[4903]: I1126 22:32:33.140980 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ed8d8216-366b-44a5-b2fd-0b3ad381efc9-util\") pod \"ed8d8216-366b-44a5-b2fd-0b3ad381efc9\" (UID: \"ed8d8216-366b-44a5-b2fd-0b3ad381efc9\") " Nov 26 22:32:33 crc kubenswrapper[4903]: I1126 22:32:33.141098 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ed8d8216-366b-44a5-b2fd-0b3ad381efc9-bundle\") pod \"ed8d8216-366b-44a5-b2fd-0b3ad381efc9\" (UID: \"ed8d8216-366b-44a5-b2fd-0b3ad381efc9\") " Nov 26 22:32:33 crc kubenswrapper[4903]: I1126 22:32:33.143624 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed8d8216-366b-44a5-b2fd-0b3ad381efc9-bundle" (OuterVolumeSpecName: "bundle") pod "ed8d8216-366b-44a5-b2fd-0b3ad381efc9" (UID: "ed8d8216-366b-44a5-b2fd-0b3ad381efc9"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:32:33 crc kubenswrapper[4903]: I1126 22:32:33.144130 4903 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ff6ba428-63b4-4a8d-9b52-9d7dd77d0430-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:32:33 crc kubenswrapper[4903]: I1126 22:32:33.144199 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7sc49\" (UniqueName: \"kubernetes.io/projected/ff6ba428-63b4-4a8d-9b52-9d7dd77d0430-kube-api-access-7sc49\") on node \"crc\" DevicePath \"\"" Nov 26 22:32:33 crc kubenswrapper[4903]: I1126 22:32:33.146377 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed8d8216-366b-44a5-b2fd-0b3ad381efc9-kube-api-access-9c8db" (OuterVolumeSpecName: "kube-api-access-9c8db") pod "ed8d8216-366b-44a5-b2fd-0b3ad381efc9" (UID: "ed8d8216-366b-44a5-b2fd-0b3ad381efc9"). InnerVolumeSpecName "kube-api-access-9c8db". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:32:33 crc kubenswrapper[4903]: I1126 22:32:33.245925 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9c8db\" (UniqueName: \"kubernetes.io/projected/ed8d8216-366b-44a5-b2fd-0b3ad381efc9-kube-api-access-9c8db\") on node \"crc\" DevicePath \"\"" Nov 26 22:32:33 crc kubenswrapper[4903]: I1126 22:32:33.245990 4903 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ed8d8216-366b-44a5-b2fd-0b3ad381efc9-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:32:33 crc kubenswrapper[4903]: I1126 22:32:33.585291 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75" event={"ID":"ff6ba428-63b4-4a8d-9b52-9d7dd77d0430","Type":"ContainerDied","Data":"9dced2cb2b8ff737ac891200cb76f7e7a998be354bc7a1febe97ba66cf998d0a"} Nov 26 22:32:33 crc kubenswrapper[4903]: I1126 22:32:33.585663 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9dced2cb2b8ff737ac891200cb76f7e7a998be354bc7a1febe97ba66cf998d0a" Nov 26 22:32:33 crc kubenswrapper[4903]: I1126 22:32:33.585305 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75" Nov 26 22:32:33 crc kubenswrapper[4903]: I1126 22:32:33.587896 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z" event={"ID":"ed8d8216-366b-44a5-b2fd-0b3ad381efc9","Type":"ContainerDied","Data":"1d3c3ed1b60c57c97b94b2b6623c611cc8649d66dcefbac30f7323b8a8aa3528"} Nov 26 22:32:33 crc kubenswrapper[4903]: I1126 22:32:33.587937 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1d3c3ed1b60c57c97b94b2b6623c611cc8649d66dcefbac30f7323b8a8aa3528" Nov 26 22:32:33 crc kubenswrapper[4903]: I1126 22:32:33.588192 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z" Nov 26 22:32:33 crc kubenswrapper[4903]: I1126 22:32:33.734620 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed8d8216-366b-44a5-b2fd-0b3ad381efc9-util" (OuterVolumeSpecName: "util") pod "ed8d8216-366b-44a5-b2fd-0b3ad381efc9" (UID: "ed8d8216-366b-44a5-b2fd-0b3ad381efc9"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:32:33 crc kubenswrapper[4903]: I1126 22:32:33.753584 4903 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ed8d8216-366b-44a5-b2fd-0b3ad381efc9-util\") on node \"crc\" DevicePath \"\"" Nov 26 22:32:33 crc kubenswrapper[4903]: I1126 22:32:33.781566 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff6ba428-63b4-4a8d-9b52-9d7dd77d0430-util" (OuterVolumeSpecName: "util") pod "ff6ba428-63b4-4a8d-9b52-9d7dd77d0430" (UID: "ff6ba428-63b4-4a8d-9b52-9d7dd77d0430"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:32:33 crc kubenswrapper[4903]: I1126 22:32:33.855463 4903 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ff6ba428-63b4-4a8d-9b52-9d7dd77d0430-util\") on node \"crc\" DevicePath \"\"" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.467988 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll"] Nov 26 22:32:44 crc kubenswrapper[4903]: E1126 22:32:44.468617 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff6ba428-63b4-4a8d-9b52-9d7dd77d0430" containerName="extract" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.468629 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff6ba428-63b4-4a8d-9b52-9d7dd77d0430" containerName="extract" Nov 26 22:32:44 crc kubenswrapper[4903]: E1126 22:32:44.468651 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff6ba428-63b4-4a8d-9b52-9d7dd77d0430" containerName="pull" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.468657 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff6ba428-63b4-4a8d-9b52-9d7dd77d0430" containerName="pull" Nov 26 22:32:44 crc kubenswrapper[4903]: E1126 22:32:44.468665 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed8d8216-366b-44a5-b2fd-0b3ad381efc9" containerName="pull" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.468672 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed8d8216-366b-44a5-b2fd-0b3ad381efc9" containerName="pull" Nov 26 22:32:44 crc kubenswrapper[4903]: E1126 22:32:44.468689 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff6ba428-63b4-4a8d-9b52-9d7dd77d0430" containerName="util" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.468694 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff6ba428-63b4-4a8d-9b52-9d7dd77d0430" containerName="util" Nov 26 22:32:44 crc kubenswrapper[4903]: E1126 22:32:44.469693 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed8d8216-366b-44a5-b2fd-0b3ad381efc9" containerName="extract" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.469719 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed8d8216-366b-44a5-b2fd-0b3ad381efc9" containerName="extract" Nov 26 22:32:44 crc kubenswrapper[4903]: E1126 22:32:44.469727 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed8d8216-366b-44a5-b2fd-0b3ad381efc9" containerName="util" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.469732 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed8d8216-366b-44a5-b2fd-0b3ad381efc9" containerName="util" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.469843 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff6ba428-63b4-4a8d-9b52-9d7dd77d0430" containerName="extract" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.469859 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed8d8216-366b-44a5-b2fd-0b3ad381efc9" containerName="extract" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.470480 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.473301 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-service-cert" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.473358 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"loki-operator-manager-config" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.473438 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"kube-root-ca.crt" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.473654 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"openshift-service-ca.crt" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.473711 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-metrics" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.474409 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-dockercfg-t9227" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.482886 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll"] Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.655042 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b-webhook-cert\") pod \"loki-operator-controller-manager-5c85bfb685-pwxll\" (UID: \"a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b\") " pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.655151 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b-manager-config\") pod \"loki-operator-controller-manager-5c85bfb685-pwxll\" (UID: \"a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b\") " pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.655171 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-5c85bfb685-pwxll\" (UID: \"a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b\") " pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.655292 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cncmr\" (UniqueName: \"kubernetes.io/projected/a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b-kube-api-access-cncmr\") pod \"loki-operator-controller-manager-5c85bfb685-pwxll\" (UID: \"a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b\") " pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.655389 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b-apiservice-cert\") pod \"loki-operator-controller-manager-5c85bfb685-pwxll\" (UID: \"a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b\") " pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.756339 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b-apiservice-cert\") pod \"loki-operator-controller-manager-5c85bfb685-pwxll\" (UID: \"a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b\") " pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.756409 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b-webhook-cert\") pod \"loki-operator-controller-manager-5c85bfb685-pwxll\" (UID: \"a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b\") " pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.756464 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b-manager-config\") pod \"loki-operator-controller-manager-5c85bfb685-pwxll\" (UID: \"a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b\") " pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.756483 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-5c85bfb685-pwxll\" (UID: \"a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b\") " pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.756501 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cncmr\" (UniqueName: \"kubernetes.io/projected/a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b-kube-api-access-cncmr\") pod \"loki-operator-controller-manager-5c85bfb685-pwxll\" (UID: \"a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b\") " pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.757828 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b-manager-config\") pod \"loki-operator-controller-manager-5c85bfb685-pwxll\" (UID: \"a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b\") " pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.765346 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b-apiservice-cert\") pod \"loki-operator-controller-manager-5c85bfb685-pwxll\" (UID: \"a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b\") " pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.765433 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-5c85bfb685-pwxll\" (UID: \"a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b\") " pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.765478 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b-webhook-cert\") pod \"loki-operator-controller-manager-5c85bfb685-pwxll\" (UID: \"a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b\") " pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.772888 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cncmr\" (UniqueName: \"kubernetes.io/projected/a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b-kube-api-access-cncmr\") pod \"loki-operator-controller-manager-5c85bfb685-pwxll\" (UID: \"a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b\") " pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" Nov 26 22:32:44 crc kubenswrapper[4903]: I1126 22:32:44.786080 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" Nov 26 22:32:45 crc kubenswrapper[4903]: I1126 22:32:45.006696 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll"] Nov 26 22:32:45 crc kubenswrapper[4903]: I1126 22:32:45.666226 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" event={"ID":"a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b","Type":"ContainerStarted","Data":"9404f33024d3f09e625e4826726c5827a601d1fa7375a5e00be8feda3c424bc2"} Nov 26 22:32:46 crc kubenswrapper[4903]: I1126 22:32:46.327396 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/cluster-logging-operator-ff9846bd-xslxr"] Nov 26 22:32:46 crc kubenswrapper[4903]: I1126 22:32:46.328277 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/cluster-logging-operator-ff9846bd-xslxr" Nov 26 22:32:46 crc kubenswrapper[4903]: I1126 22:32:46.331126 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"kube-root-ca.crt" Nov 26 22:32:46 crc kubenswrapper[4903]: I1126 22:32:46.331300 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"cluster-logging-operator-dockercfg-kf6h9" Nov 26 22:32:46 crc kubenswrapper[4903]: I1126 22:32:46.331931 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"openshift-service-ca.crt" Nov 26 22:32:46 crc kubenswrapper[4903]: I1126 22:32:46.345195 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/cluster-logging-operator-ff9846bd-xslxr"] Nov 26 22:32:46 crc kubenswrapper[4903]: I1126 22:32:46.480334 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szmmw\" (UniqueName: \"kubernetes.io/projected/a56d21dd-5874-4bc1-9fe3-5fc6a5b4a354-kube-api-access-szmmw\") pod \"cluster-logging-operator-ff9846bd-xslxr\" (UID: \"a56d21dd-5874-4bc1-9fe3-5fc6a5b4a354\") " pod="openshift-logging/cluster-logging-operator-ff9846bd-xslxr" Nov 26 22:32:46 crc kubenswrapper[4903]: I1126 22:32:46.582393 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szmmw\" (UniqueName: \"kubernetes.io/projected/a56d21dd-5874-4bc1-9fe3-5fc6a5b4a354-kube-api-access-szmmw\") pod \"cluster-logging-operator-ff9846bd-xslxr\" (UID: \"a56d21dd-5874-4bc1-9fe3-5fc6a5b4a354\") " pod="openshift-logging/cluster-logging-operator-ff9846bd-xslxr" Nov 26 22:32:46 crc kubenswrapper[4903]: I1126 22:32:46.605103 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szmmw\" (UniqueName: \"kubernetes.io/projected/a56d21dd-5874-4bc1-9fe3-5fc6a5b4a354-kube-api-access-szmmw\") pod \"cluster-logging-operator-ff9846bd-xslxr\" (UID: \"a56d21dd-5874-4bc1-9fe3-5fc6a5b4a354\") " pod="openshift-logging/cluster-logging-operator-ff9846bd-xslxr" Nov 26 22:32:46 crc kubenswrapper[4903]: I1126 22:32:46.650858 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/cluster-logging-operator-ff9846bd-xslxr" Nov 26 22:32:47 crc kubenswrapper[4903]: I1126 22:32:47.132391 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/cluster-logging-operator-ff9846bd-xslxr"] Nov 26 22:32:47 crc kubenswrapper[4903]: I1126 22:32:47.681179 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/cluster-logging-operator-ff9846bd-xslxr" event={"ID":"a56d21dd-5874-4bc1-9fe3-5fc6a5b4a354","Type":"ContainerStarted","Data":"acec04a6d9253cc832dc472a12be4d2a51332d70384c033bfbec0d452386c839"} Nov 26 22:32:50 crc kubenswrapper[4903]: I1126 22:32:50.714665 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" event={"ID":"a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b","Type":"ContainerStarted","Data":"4db164b02efe5a55e76b3ecf414cfdacfb7e242134eada8ffa7395d466deb281"} Nov 26 22:32:57 crc kubenswrapper[4903]: I1126 22:32:57.775325 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/cluster-logging-operator-ff9846bd-xslxr" event={"ID":"a56d21dd-5874-4bc1-9fe3-5fc6a5b4a354","Type":"ContainerStarted","Data":"d433ea5c14fbfe09a490aa54fb3a88ff64e16ff4c6987975dcdd84ec616bd8c5"} Nov 26 22:32:57 crc kubenswrapper[4903]: I1126 22:32:57.782191 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" event={"ID":"a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b","Type":"ContainerStarted","Data":"9dd5c7cae9ebe59615ec97f0175c9f1b6477c34e5f1d96302a42fb0c0fb47af2"} Nov 26 22:32:57 crc kubenswrapper[4903]: I1126 22:32:57.782539 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" Nov 26 22:32:57 crc kubenswrapper[4903]: I1126 22:32:57.788141 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" Nov 26 22:32:57 crc kubenswrapper[4903]: I1126 22:32:57.808362 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/cluster-logging-operator-ff9846bd-xslxr" podStartSLOduration=2.090010324 podStartE2EDuration="11.808338784s" podCreationTimestamp="2025-11-26 22:32:46 +0000 UTC" firstStartedPulling="2025-11-26 22:32:47.17590605 +0000 UTC m=+695.866140960" lastFinishedPulling="2025-11-26 22:32:56.89423451 +0000 UTC m=+705.584469420" observedRunningTime="2025-11-26 22:32:57.8026624 +0000 UTC m=+706.492897320" watchObservedRunningTime="2025-11-26 22:32:57.808338784 +0000 UTC m=+706.498573704" Nov 26 22:33:03 crc kubenswrapper[4903]: I1126 22:33:03.280592 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" podStartSLOduration=7.388938945 podStartE2EDuration="19.280573934s" podCreationTimestamp="2025-11-26 22:32:44 +0000 UTC" firstStartedPulling="2025-11-26 22:32:45.029871851 +0000 UTC m=+693.720106761" lastFinishedPulling="2025-11-26 22:32:56.92150684 +0000 UTC m=+705.611741750" observedRunningTime="2025-11-26 22:32:57.839866109 +0000 UTC m=+706.530101029" watchObservedRunningTime="2025-11-26 22:33:03.280573934 +0000 UTC m=+711.970808844" Nov 26 22:33:03 crc kubenswrapper[4903]: I1126 22:33:03.283334 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["minio-dev/minio"] Nov 26 22:33:03 crc kubenswrapper[4903]: I1126 22:33:03.284203 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Nov 26 22:33:03 crc kubenswrapper[4903]: I1126 22:33:03.287075 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"kube-root-ca.crt" Nov 26 22:33:03 crc kubenswrapper[4903]: I1126 22:33:03.287226 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"openshift-service-ca.crt" Nov 26 22:33:03 crc kubenswrapper[4903]: I1126 22:33:03.294867 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Nov 26 22:33:03 crc kubenswrapper[4903]: I1126 22:33:03.450043 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-b119cb56-47f1-4058-926f-faad0d3bfb1a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b119cb56-47f1-4058-926f-faad0d3bfb1a\") pod \"minio\" (UID: \"7ae1f53c-b5fb-43db-b752-97d84be5d3c5\") " pod="minio-dev/minio" Nov 26 22:33:03 crc kubenswrapper[4903]: I1126 22:33:03.450101 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lfdrg\" (UniqueName: \"kubernetes.io/projected/7ae1f53c-b5fb-43db-b752-97d84be5d3c5-kube-api-access-lfdrg\") pod \"minio\" (UID: \"7ae1f53c-b5fb-43db-b752-97d84be5d3c5\") " pod="minio-dev/minio" Nov 26 22:33:03 crc kubenswrapper[4903]: I1126 22:33:03.551608 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-b119cb56-47f1-4058-926f-faad0d3bfb1a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b119cb56-47f1-4058-926f-faad0d3bfb1a\") pod \"minio\" (UID: \"7ae1f53c-b5fb-43db-b752-97d84be5d3c5\") " pod="minio-dev/minio" Nov 26 22:33:03 crc kubenswrapper[4903]: I1126 22:33:03.552020 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lfdrg\" (UniqueName: \"kubernetes.io/projected/7ae1f53c-b5fb-43db-b752-97d84be5d3c5-kube-api-access-lfdrg\") pod \"minio\" (UID: \"7ae1f53c-b5fb-43db-b752-97d84be5d3c5\") " pod="minio-dev/minio" Nov 26 22:33:03 crc kubenswrapper[4903]: I1126 22:33:03.554334 4903 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 22:33:03 crc kubenswrapper[4903]: I1126 22:33:03.554378 4903 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-b119cb56-47f1-4058-926f-faad0d3bfb1a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b119cb56-47f1-4058-926f-faad0d3bfb1a\") pod \"minio\" (UID: \"7ae1f53c-b5fb-43db-b752-97d84be5d3c5\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/f97f70c7db0d107365ba9380682054117b26a76438e41c11c0e8ee97386d3495/globalmount\"" pod="minio-dev/minio" Nov 26 22:33:03 crc kubenswrapper[4903]: I1126 22:33:03.579146 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lfdrg\" (UniqueName: \"kubernetes.io/projected/7ae1f53c-b5fb-43db-b752-97d84be5d3c5-kube-api-access-lfdrg\") pod \"minio\" (UID: \"7ae1f53c-b5fb-43db-b752-97d84be5d3c5\") " pod="minio-dev/minio" Nov 26 22:33:03 crc kubenswrapper[4903]: I1126 22:33:03.580680 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-b119cb56-47f1-4058-926f-faad0d3bfb1a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b119cb56-47f1-4058-926f-faad0d3bfb1a\") pod \"minio\" (UID: \"7ae1f53c-b5fb-43db-b752-97d84be5d3c5\") " pod="minio-dev/minio" Nov 26 22:33:03 crc kubenswrapper[4903]: I1126 22:33:03.645892 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Nov 26 22:33:04 crc kubenswrapper[4903]: I1126 22:33:04.102329 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Nov 26 22:33:04 crc kubenswrapper[4903]: I1126 22:33:04.835668 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"7ae1f53c-b5fb-43db-b752-97d84be5d3c5","Type":"ContainerStarted","Data":"364907ca001900ef2cb0fbbc3bc8cc1f85aab6dbb4f103e9f38e75df03ec374c"} Nov 26 22:33:07 crc kubenswrapper[4903]: I1126 22:33:07.863485 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"7ae1f53c-b5fb-43db-b752-97d84be5d3c5","Type":"ContainerStarted","Data":"14b406ea63c8b9d2490455374f7fdd964dfb0ff1b02ff9dade1ab68efb345a89"} Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.592610 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="minio-dev/minio" podStartSLOduration=8.314788404 podStartE2EDuration="11.592574019s" podCreationTimestamp="2025-11-26 22:33:00 +0000 UTC" firstStartedPulling="2025-11-26 22:33:04.120024443 +0000 UTC m=+712.810259383" lastFinishedPulling="2025-11-26 22:33:07.397810098 +0000 UTC m=+716.088044998" observedRunningTime="2025-11-26 22:33:07.883618728 +0000 UTC m=+716.573853638" watchObservedRunningTime="2025-11-26 22:33:11.592574019 +0000 UTC m=+720.282808969" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.596182 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-distributor-76cc67bf56-htm5b"] Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.598210 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-distributor-76cc67bf56-htm5b" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.601875 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-http" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.601909 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-ca-bundle" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.602015 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-config" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.602510 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-grpc" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.602779 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-dockercfg-nrccd" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.623102 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-distributor-76cc67bf56-htm5b"] Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.703629 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/546d4145-a63b-4664-86d0-9ce432670a7b-config\") pod \"logging-loki-distributor-76cc67bf56-htm5b\" (UID: \"546d4145-a63b-4664-86d0-9ce432670a7b\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-htm5b" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.703747 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/546d4145-a63b-4664-86d0-9ce432670a7b-logging-loki-ca-bundle\") pod \"logging-loki-distributor-76cc67bf56-htm5b\" (UID: \"546d4145-a63b-4664-86d0-9ce432670a7b\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-htm5b" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.703809 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wswln\" (UniqueName: \"kubernetes.io/projected/546d4145-a63b-4664-86d0-9ce432670a7b-kube-api-access-wswln\") pod \"logging-loki-distributor-76cc67bf56-htm5b\" (UID: \"546d4145-a63b-4664-86d0-9ce432670a7b\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-htm5b" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.703945 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/546d4145-a63b-4664-86d0-9ce432670a7b-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-76cc67bf56-htm5b\" (UID: \"546d4145-a63b-4664-86d0-9ce432670a7b\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-htm5b" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.704010 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/546d4145-a63b-4664-86d0-9ce432670a7b-logging-loki-distributor-http\") pod \"logging-loki-distributor-76cc67bf56-htm5b\" (UID: \"546d4145-a63b-4664-86d0-9ce432670a7b\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-htm5b" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.752647 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-querier-5895d59bb8-k45nx"] Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.753674 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-querier-5895d59bb8-k45nx" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.756369 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-grpc" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.756616 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-s3" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.756797 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-http" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.774337 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-querier-5895d59bb8-k45nx"] Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.804880 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/546d4145-a63b-4664-86d0-9ce432670a7b-logging-loki-distributor-http\") pod \"logging-loki-distributor-76cc67bf56-htm5b\" (UID: \"546d4145-a63b-4664-86d0-9ce432670a7b\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-htm5b" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.805135 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/546d4145-a63b-4664-86d0-9ce432670a7b-config\") pod \"logging-loki-distributor-76cc67bf56-htm5b\" (UID: \"546d4145-a63b-4664-86d0-9ce432670a7b\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-htm5b" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.805237 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/546d4145-a63b-4664-86d0-9ce432670a7b-logging-loki-ca-bundle\") pod \"logging-loki-distributor-76cc67bf56-htm5b\" (UID: \"546d4145-a63b-4664-86d0-9ce432670a7b\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-htm5b" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.805347 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wswln\" (UniqueName: \"kubernetes.io/projected/546d4145-a63b-4664-86d0-9ce432670a7b-kube-api-access-wswln\") pod \"logging-loki-distributor-76cc67bf56-htm5b\" (UID: \"546d4145-a63b-4664-86d0-9ce432670a7b\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-htm5b" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.805795 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/546d4145-a63b-4664-86d0-9ce432670a7b-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-76cc67bf56-htm5b\" (UID: \"546d4145-a63b-4664-86d0-9ce432670a7b\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-htm5b" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.806401 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/546d4145-a63b-4664-86d0-9ce432670a7b-config\") pod \"logging-loki-distributor-76cc67bf56-htm5b\" (UID: \"546d4145-a63b-4664-86d0-9ce432670a7b\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-htm5b" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.806403 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/546d4145-a63b-4664-86d0-9ce432670a7b-logging-loki-ca-bundle\") pod \"logging-loki-distributor-76cc67bf56-htm5b\" (UID: \"546d4145-a63b-4664-86d0-9ce432670a7b\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-htm5b" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.812372 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/546d4145-a63b-4664-86d0-9ce432670a7b-logging-loki-distributor-http\") pod \"logging-loki-distributor-76cc67bf56-htm5b\" (UID: \"546d4145-a63b-4664-86d0-9ce432670a7b\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-htm5b" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.819542 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/546d4145-a63b-4664-86d0-9ce432670a7b-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-76cc67bf56-htm5b\" (UID: \"546d4145-a63b-4664-86d0-9ce432670a7b\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-htm5b" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.829931 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-query-frontend-84558f7c9f-52tt7"] Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.831234 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-52tt7" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.834536 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-http" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.834746 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-grpc" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.835131 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wswln\" (UniqueName: \"kubernetes.io/projected/546d4145-a63b-4664-86d0-9ce432670a7b-kube-api-access-wswln\") pod \"logging-loki-distributor-76cc67bf56-htm5b\" (UID: \"546d4145-a63b-4664-86d0-9ce432670a7b\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-htm5b" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.841275 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-query-frontend-84558f7c9f-52tt7"] Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.907751 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/138798d6-77b9-4e20-970b-d83e0378e667-config\") pod \"logging-loki-querier-5895d59bb8-k45nx\" (UID: \"138798d6-77b9-4e20-970b-d83e0378e667\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-k45nx" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.907805 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/138798d6-77b9-4e20-970b-d83e0378e667-logging-loki-ca-bundle\") pod \"logging-loki-querier-5895d59bb8-k45nx\" (UID: \"138798d6-77b9-4e20-970b-d83e0378e667\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-k45nx" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.907878 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/138798d6-77b9-4e20-970b-d83e0378e667-logging-loki-s3\") pod \"logging-loki-querier-5895d59bb8-k45nx\" (UID: \"138798d6-77b9-4e20-970b-d83e0378e667\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-k45nx" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.907899 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjkrk\" (UniqueName: \"kubernetes.io/projected/138798d6-77b9-4e20-970b-d83e0378e667-kube-api-access-sjkrk\") pod \"logging-loki-querier-5895d59bb8-k45nx\" (UID: \"138798d6-77b9-4e20-970b-d83e0378e667\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-k45nx" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.907925 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/138798d6-77b9-4e20-970b-d83e0378e667-logging-loki-querier-http\") pod \"logging-loki-querier-5895d59bb8-k45nx\" (UID: \"138798d6-77b9-4e20-970b-d83e0378e667\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-k45nx" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.907953 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/138798d6-77b9-4e20-970b-d83e0378e667-logging-loki-querier-grpc\") pod \"logging-loki-querier-5895d59bb8-k45nx\" (UID: \"138798d6-77b9-4e20-970b-d83e0378e667\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-k45nx" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.939328 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-distributor-76cc67bf56-htm5b" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.956059 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs"] Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.957131 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.960100 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway-ca-bundle" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.960342 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-http" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.962579 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-dockercfg-tvvts" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.962909 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.963207 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-client-http" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.963269 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns"] Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.964731 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.965044 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway" Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.967514 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs"] Nov 26 22:33:11 crc kubenswrapper[4903]: I1126 22:33:11.980176 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns"] Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.010062 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/9fb3c717-adf5-483c-9d16-6d47d489a5e1-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-84558f7c9f-52tt7\" (UID: \"9fb3c717-adf5-483c-9d16-6d47d489a5e1\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-52tt7" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.010148 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjkrk\" (UniqueName: \"kubernetes.io/projected/138798d6-77b9-4e20-970b-d83e0378e667-kube-api-access-sjkrk\") pod \"logging-loki-querier-5895d59bb8-k45nx\" (UID: \"138798d6-77b9-4e20-970b-d83e0378e667\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-k45nx" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.010172 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/138798d6-77b9-4e20-970b-d83e0378e667-logging-loki-s3\") pod \"logging-loki-querier-5895d59bb8-k45nx\" (UID: \"138798d6-77b9-4e20-970b-d83e0378e667\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-k45nx" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.010210 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/138798d6-77b9-4e20-970b-d83e0378e667-logging-loki-querier-http\") pod \"logging-loki-querier-5895d59bb8-k45nx\" (UID: \"138798d6-77b9-4e20-970b-d83e0378e667\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-k45nx" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.010231 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/9fb3c717-adf5-483c-9d16-6d47d489a5e1-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-84558f7c9f-52tt7\" (UID: \"9fb3c717-adf5-483c-9d16-6d47d489a5e1\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-52tt7" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.010259 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/138798d6-77b9-4e20-970b-d83e0378e667-logging-loki-querier-grpc\") pod \"logging-loki-querier-5895d59bb8-k45nx\" (UID: \"138798d6-77b9-4e20-970b-d83e0378e667\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-k45nx" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.010300 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9fb3c717-adf5-483c-9d16-6d47d489a5e1-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-84558f7c9f-52tt7\" (UID: \"9fb3c717-adf5-483c-9d16-6d47d489a5e1\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-52tt7" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.010331 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/138798d6-77b9-4e20-970b-d83e0378e667-config\") pod \"logging-loki-querier-5895d59bb8-k45nx\" (UID: \"138798d6-77b9-4e20-970b-d83e0378e667\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-k45nx" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.010355 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zf2xw\" (UniqueName: \"kubernetes.io/projected/9fb3c717-adf5-483c-9d16-6d47d489a5e1-kube-api-access-zf2xw\") pod \"logging-loki-query-frontend-84558f7c9f-52tt7\" (UID: \"9fb3c717-adf5-483c-9d16-6d47d489a5e1\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-52tt7" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.010377 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/138798d6-77b9-4e20-970b-d83e0378e667-logging-loki-ca-bundle\") pod \"logging-loki-querier-5895d59bb8-k45nx\" (UID: \"138798d6-77b9-4e20-970b-d83e0378e667\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-k45nx" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.010396 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9fb3c717-adf5-483c-9d16-6d47d489a5e1-config\") pod \"logging-loki-query-frontend-84558f7c9f-52tt7\" (UID: \"9fb3c717-adf5-483c-9d16-6d47d489a5e1\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-52tt7" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.011883 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/138798d6-77b9-4e20-970b-d83e0378e667-logging-loki-ca-bundle\") pod \"logging-loki-querier-5895d59bb8-k45nx\" (UID: \"138798d6-77b9-4e20-970b-d83e0378e667\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-k45nx" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.012926 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/138798d6-77b9-4e20-970b-d83e0378e667-config\") pod \"logging-loki-querier-5895d59bb8-k45nx\" (UID: \"138798d6-77b9-4e20-970b-d83e0378e667\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-k45nx" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.012966 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-s3" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.012997 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-http" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.013045 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-grpc" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.024379 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/138798d6-77b9-4e20-970b-d83e0378e667-logging-loki-querier-http\") pod \"logging-loki-querier-5895d59bb8-k45nx\" (UID: \"138798d6-77b9-4e20-970b-d83e0378e667\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-k45nx" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.032581 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/138798d6-77b9-4e20-970b-d83e0378e667-logging-loki-querier-grpc\") pod \"logging-loki-querier-5895d59bb8-k45nx\" (UID: \"138798d6-77b9-4e20-970b-d83e0378e667\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-k45nx" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.037665 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjkrk\" (UniqueName: \"kubernetes.io/projected/138798d6-77b9-4e20-970b-d83e0378e667-kube-api-access-sjkrk\") pod \"logging-loki-querier-5895d59bb8-k45nx\" (UID: \"138798d6-77b9-4e20-970b-d83e0378e667\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-k45nx" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.042209 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/138798d6-77b9-4e20-970b-d83e0378e667-logging-loki-s3\") pod \"logging-loki-querier-5895d59bb8-k45nx\" (UID: \"138798d6-77b9-4e20-970b-d83e0378e667\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-k45nx" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.070003 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-querier-5895d59bb8-k45nx" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.111618 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/64e0c0a9-13e7-4f0b-989d-8f217958cd92-tenants\") pod \"logging-loki-gateway-6b8dc7bf86-tvgns\" (UID: \"64e0c0a9-13e7-4f0b-989d-8f217958cd92\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.112028 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hg2h\" (UniqueName: \"kubernetes.io/projected/64e0c0a9-13e7-4f0b-989d-8f217958cd92-kube-api-access-5hg2h\") pod \"logging-loki-gateway-6b8dc7bf86-tvgns\" (UID: \"64e0c0a9-13e7-4f0b-989d-8f217958cd92\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.112065 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/16a3e6c0-118c-4827-b39b-d9a59d959fec-logging-loki-ca-bundle\") pod \"logging-loki-gateway-6b8dc7bf86-g4fbs\" (UID: \"16a3e6c0-118c-4827-b39b-d9a59d959fec\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.112118 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/9fb3c717-adf5-483c-9d16-6d47d489a5e1-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-84558f7c9f-52tt7\" (UID: \"9fb3c717-adf5-483c-9d16-6d47d489a5e1\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-52tt7" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.112146 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/64e0c0a9-13e7-4f0b-989d-8f217958cd92-logging-loki-ca-bundle\") pod \"logging-loki-gateway-6b8dc7bf86-tvgns\" (UID: \"64e0c0a9-13e7-4f0b-989d-8f217958cd92\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.112198 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/16a3e6c0-118c-4827-b39b-d9a59d959fec-tenants\") pod \"logging-loki-gateway-6b8dc7bf86-g4fbs\" (UID: \"16a3e6c0-118c-4827-b39b-d9a59d959fec\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.112218 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/64e0c0a9-13e7-4f0b-989d-8f217958cd92-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-6b8dc7bf86-tvgns\" (UID: \"64e0c0a9-13e7-4f0b-989d-8f217958cd92\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.112299 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9fb3c717-adf5-483c-9d16-6d47d489a5e1-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-84558f7c9f-52tt7\" (UID: \"9fb3c717-adf5-483c-9d16-6d47d489a5e1\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-52tt7" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.112648 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/16a3e6c0-118c-4827-b39b-d9a59d959fec-tls-secret\") pod \"logging-loki-gateway-6b8dc7bf86-g4fbs\" (UID: \"16a3e6c0-118c-4827-b39b-d9a59d959fec\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.113715 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/16a3e6c0-118c-4827-b39b-d9a59d959fec-lokistack-gateway\") pod \"logging-loki-gateway-6b8dc7bf86-g4fbs\" (UID: \"16a3e6c0-118c-4827-b39b-d9a59d959fec\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.113809 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/16a3e6c0-118c-4827-b39b-d9a59d959fec-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-6b8dc7bf86-g4fbs\" (UID: \"16a3e6c0-118c-4827-b39b-d9a59d959fec\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.113831 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/64e0c0a9-13e7-4f0b-989d-8f217958cd92-lokistack-gateway\") pod \"logging-loki-gateway-6b8dc7bf86-tvgns\" (UID: \"64e0c0a9-13e7-4f0b-989d-8f217958cd92\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.113850 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/64e0c0a9-13e7-4f0b-989d-8f217958cd92-tls-secret\") pod \"logging-loki-gateway-6b8dc7bf86-tvgns\" (UID: \"64e0c0a9-13e7-4f0b-989d-8f217958cd92\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.114442 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-grpc" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.113988 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zf2xw\" (UniqueName: \"kubernetes.io/projected/9fb3c717-adf5-483c-9d16-6d47d489a5e1-kube-api-access-zf2xw\") pod \"logging-loki-query-frontend-84558f7c9f-52tt7\" (UID: \"9fb3c717-adf5-483c-9d16-6d47d489a5e1\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-52tt7" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.115110 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/64e0c0a9-13e7-4f0b-989d-8f217958cd92-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-6b8dc7bf86-tvgns\" (UID: \"64e0c0a9-13e7-4f0b-989d-8f217958cd92\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.115251 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9fb3c717-adf5-483c-9d16-6d47d489a5e1-config\") pod \"logging-loki-query-frontend-84558f7c9f-52tt7\" (UID: \"9fb3c717-adf5-483c-9d16-6d47d489a5e1\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-52tt7" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.115271 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/16a3e6c0-118c-4827-b39b-d9a59d959fec-rbac\") pod \"logging-loki-gateway-6b8dc7bf86-g4fbs\" (UID: \"16a3e6c0-118c-4827-b39b-d9a59d959fec\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.115406 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mknjr\" (UniqueName: \"kubernetes.io/projected/16a3e6c0-118c-4827-b39b-d9a59d959fec-kube-api-access-mknjr\") pod \"logging-loki-gateway-6b8dc7bf86-g4fbs\" (UID: \"16a3e6c0-118c-4827-b39b-d9a59d959fec\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.115426 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/64e0c0a9-13e7-4f0b-989d-8f217958cd92-rbac\") pod \"logging-loki-gateway-6b8dc7bf86-tvgns\" (UID: \"64e0c0a9-13e7-4f0b-989d-8f217958cd92\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.115455 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9fb3c717-adf5-483c-9d16-6d47d489a5e1-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-84558f7c9f-52tt7\" (UID: \"9fb3c717-adf5-483c-9d16-6d47d489a5e1\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-52tt7" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.115664 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/9fb3c717-adf5-483c-9d16-6d47d489a5e1-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-84558f7c9f-52tt7\" (UID: \"9fb3c717-adf5-483c-9d16-6d47d489a5e1\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-52tt7" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.115814 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/16a3e6c0-118c-4827-b39b-d9a59d959fec-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-6b8dc7bf86-g4fbs\" (UID: \"16a3e6c0-118c-4827-b39b-d9a59d959fec\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.117358 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9fb3c717-adf5-483c-9d16-6d47d489a5e1-config\") pod \"logging-loki-query-frontend-84558f7c9f-52tt7\" (UID: \"9fb3c717-adf5-483c-9d16-6d47d489a5e1\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-52tt7" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.118019 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-http" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.132238 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/9fb3c717-adf5-483c-9d16-6d47d489a5e1-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-84558f7c9f-52tt7\" (UID: \"9fb3c717-adf5-483c-9d16-6d47d489a5e1\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-52tt7" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.135217 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zf2xw\" (UniqueName: \"kubernetes.io/projected/9fb3c717-adf5-483c-9d16-6d47d489a5e1-kube-api-access-zf2xw\") pod \"logging-loki-query-frontend-84558f7c9f-52tt7\" (UID: \"9fb3c717-adf5-483c-9d16-6d47d489a5e1\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-52tt7" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.135416 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/9fb3c717-adf5-483c-9d16-6d47d489a5e1-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-84558f7c9f-52tt7\" (UID: \"9fb3c717-adf5-483c-9d16-6d47d489a5e1\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-52tt7" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.181399 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-52tt7" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.224476 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/64e0c0a9-13e7-4f0b-989d-8f217958cd92-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-6b8dc7bf86-tvgns\" (UID: \"64e0c0a9-13e7-4f0b-989d-8f217958cd92\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.224523 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/16a3e6c0-118c-4827-b39b-d9a59d959fec-rbac\") pod \"logging-loki-gateway-6b8dc7bf86-g4fbs\" (UID: \"16a3e6c0-118c-4827-b39b-d9a59d959fec\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.224545 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mknjr\" (UniqueName: \"kubernetes.io/projected/16a3e6c0-118c-4827-b39b-d9a59d959fec-kube-api-access-mknjr\") pod \"logging-loki-gateway-6b8dc7bf86-g4fbs\" (UID: \"16a3e6c0-118c-4827-b39b-d9a59d959fec\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.224564 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/64e0c0a9-13e7-4f0b-989d-8f217958cd92-rbac\") pod \"logging-loki-gateway-6b8dc7bf86-tvgns\" (UID: \"64e0c0a9-13e7-4f0b-989d-8f217958cd92\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.224603 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/16a3e6c0-118c-4827-b39b-d9a59d959fec-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-6b8dc7bf86-g4fbs\" (UID: \"16a3e6c0-118c-4827-b39b-d9a59d959fec\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.224625 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/64e0c0a9-13e7-4f0b-989d-8f217958cd92-tenants\") pod \"logging-loki-gateway-6b8dc7bf86-tvgns\" (UID: \"64e0c0a9-13e7-4f0b-989d-8f217958cd92\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.224641 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hg2h\" (UniqueName: \"kubernetes.io/projected/64e0c0a9-13e7-4f0b-989d-8f217958cd92-kube-api-access-5hg2h\") pod \"logging-loki-gateway-6b8dc7bf86-tvgns\" (UID: \"64e0c0a9-13e7-4f0b-989d-8f217958cd92\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.224666 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/16a3e6c0-118c-4827-b39b-d9a59d959fec-logging-loki-ca-bundle\") pod \"logging-loki-gateway-6b8dc7bf86-g4fbs\" (UID: \"16a3e6c0-118c-4827-b39b-d9a59d959fec\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.224718 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/64e0c0a9-13e7-4f0b-989d-8f217958cd92-logging-loki-ca-bundle\") pod \"logging-loki-gateway-6b8dc7bf86-tvgns\" (UID: \"64e0c0a9-13e7-4f0b-989d-8f217958cd92\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.224744 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/16a3e6c0-118c-4827-b39b-d9a59d959fec-tenants\") pod \"logging-loki-gateway-6b8dc7bf86-g4fbs\" (UID: \"16a3e6c0-118c-4827-b39b-d9a59d959fec\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.224764 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/64e0c0a9-13e7-4f0b-989d-8f217958cd92-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-6b8dc7bf86-tvgns\" (UID: \"64e0c0a9-13e7-4f0b-989d-8f217958cd92\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.224790 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/16a3e6c0-118c-4827-b39b-d9a59d959fec-tls-secret\") pod \"logging-loki-gateway-6b8dc7bf86-g4fbs\" (UID: \"16a3e6c0-118c-4827-b39b-d9a59d959fec\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.224817 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/16a3e6c0-118c-4827-b39b-d9a59d959fec-lokistack-gateway\") pod \"logging-loki-gateway-6b8dc7bf86-g4fbs\" (UID: \"16a3e6c0-118c-4827-b39b-d9a59d959fec\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.224833 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/16a3e6c0-118c-4827-b39b-d9a59d959fec-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-6b8dc7bf86-g4fbs\" (UID: \"16a3e6c0-118c-4827-b39b-d9a59d959fec\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.224851 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/64e0c0a9-13e7-4f0b-989d-8f217958cd92-lokistack-gateway\") pod \"logging-loki-gateway-6b8dc7bf86-tvgns\" (UID: \"64e0c0a9-13e7-4f0b-989d-8f217958cd92\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.224869 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/64e0c0a9-13e7-4f0b-989d-8f217958cd92-tls-secret\") pod \"logging-loki-gateway-6b8dc7bf86-tvgns\" (UID: \"64e0c0a9-13e7-4f0b-989d-8f217958cd92\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.225580 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/16a3e6c0-118c-4827-b39b-d9a59d959fec-rbac\") pod \"logging-loki-gateway-6b8dc7bf86-g4fbs\" (UID: \"16a3e6c0-118c-4827-b39b-d9a59d959fec\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.225614 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/64e0c0a9-13e7-4f0b-989d-8f217958cd92-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-6b8dc7bf86-tvgns\" (UID: \"64e0c0a9-13e7-4f0b-989d-8f217958cd92\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.226271 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/64e0c0a9-13e7-4f0b-989d-8f217958cd92-logging-loki-ca-bundle\") pod \"logging-loki-gateway-6b8dc7bf86-tvgns\" (UID: \"64e0c0a9-13e7-4f0b-989d-8f217958cd92\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.226608 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/16a3e6c0-118c-4827-b39b-d9a59d959fec-logging-loki-ca-bundle\") pod \"logging-loki-gateway-6b8dc7bf86-g4fbs\" (UID: \"16a3e6c0-118c-4827-b39b-d9a59d959fec\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.226668 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/64e0c0a9-13e7-4f0b-989d-8f217958cd92-lokistack-gateway\") pod \"logging-loki-gateway-6b8dc7bf86-tvgns\" (UID: \"64e0c0a9-13e7-4f0b-989d-8f217958cd92\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.227199 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/64e0c0a9-13e7-4f0b-989d-8f217958cd92-rbac\") pod \"logging-loki-gateway-6b8dc7bf86-tvgns\" (UID: \"64e0c0a9-13e7-4f0b-989d-8f217958cd92\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.227255 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/16a3e6c0-118c-4827-b39b-d9a59d959fec-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-6b8dc7bf86-g4fbs\" (UID: \"16a3e6c0-118c-4827-b39b-d9a59d959fec\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.227462 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/16a3e6c0-118c-4827-b39b-d9a59d959fec-lokistack-gateway\") pod \"logging-loki-gateway-6b8dc7bf86-g4fbs\" (UID: \"16a3e6c0-118c-4827-b39b-d9a59d959fec\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.232334 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/64e0c0a9-13e7-4f0b-989d-8f217958cd92-tenants\") pod \"logging-loki-gateway-6b8dc7bf86-tvgns\" (UID: \"64e0c0a9-13e7-4f0b-989d-8f217958cd92\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.233076 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/16a3e6c0-118c-4827-b39b-d9a59d959fec-tenants\") pod \"logging-loki-gateway-6b8dc7bf86-g4fbs\" (UID: \"16a3e6c0-118c-4827-b39b-d9a59d959fec\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.236488 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/16a3e6c0-118c-4827-b39b-d9a59d959fec-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-6b8dc7bf86-g4fbs\" (UID: \"16a3e6c0-118c-4827-b39b-d9a59d959fec\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.239667 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/64e0c0a9-13e7-4f0b-989d-8f217958cd92-tls-secret\") pod \"logging-loki-gateway-6b8dc7bf86-tvgns\" (UID: \"64e0c0a9-13e7-4f0b-989d-8f217958cd92\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.240105 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/64e0c0a9-13e7-4f0b-989d-8f217958cd92-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-6b8dc7bf86-tvgns\" (UID: \"64e0c0a9-13e7-4f0b-989d-8f217958cd92\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.240180 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/16a3e6c0-118c-4827-b39b-d9a59d959fec-tls-secret\") pod \"logging-loki-gateway-6b8dc7bf86-g4fbs\" (UID: \"16a3e6c0-118c-4827-b39b-d9a59d959fec\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.244442 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hg2h\" (UniqueName: \"kubernetes.io/projected/64e0c0a9-13e7-4f0b-989d-8f217958cd92-kube-api-access-5hg2h\") pod \"logging-loki-gateway-6b8dc7bf86-tvgns\" (UID: \"64e0c0a9-13e7-4f0b-989d-8f217958cd92\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.245208 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mknjr\" (UniqueName: \"kubernetes.io/projected/16a3e6c0-118c-4827-b39b-d9a59d959fec-kube-api-access-mknjr\") pod \"logging-loki-gateway-6b8dc7bf86-g4fbs\" (UID: \"16a3e6c0-118c-4827-b39b-d9a59d959fec\") " pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.314061 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.318800 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.441350 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-distributor-76cc67bf56-htm5b"] Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.506612 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-query-frontend-84558f7c9f-52tt7"] Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.562447 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-querier-5895d59bb8-k45nx"] Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.755683 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.756854 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-ingester-0" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.758621 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-http" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.759806 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-grpc" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.764253 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.804997 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.805818 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-compactor-0" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.807637 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-http" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.807909 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-grpc" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.818191 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.875355 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs"] Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.898231 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.899749 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-index-gateway-0" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.901847 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-grpc" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.903142 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-http" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.906385 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" event={"ID":"16a3e6c0-118c-4827-b39b-d9a59d959fec","Type":"ContainerStarted","Data":"0f82e8f4fcc1161270c42d7870c679f3321cd155631df33100548cf97e5c87a2"} Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.907252 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-querier-5895d59bb8-k45nx" event={"ID":"138798d6-77b9-4e20-970b-d83e0378e667","Type":"ContainerStarted","Data":"c61499aa112ca0474f4ab2fddabc40a69f248317bae49e53d61f4f36b40ee1c3"} Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.908004 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-distributor-76cc67bf56-htm5b" event={"ID":"546d4145-a63b-4664-86d0-9ce432670a7b","Type":"ContainerStarted","Data":"f9efd2dbc7f3efb05c30d1fa94ead569b7053cbe7640b208e1b4b50bd9b975c4"} Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.908879 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.910271 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-52tt7" event={"ID":"9fb3c717-adf5-483c-9d16-6d47d489a5e1","Type":"ContainerStarted","Data":"96d8a4f2cef453f568d9af49507cb9a85b7d6995b2f1a20c03a3fb8c0bb2c23f"} Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.923236 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns"] Nov 26 22:33:12 crc kubenswrapper[4903]: W1126 22:33:12.925217 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod64e0c0a9_13e7_4f0b_989d_8f217958cd92.slice/crio-6e1f05b31a3f86fac46a09b9bd7a3536b46405071c4282fed9d2717469252dd5 WatchSource:0}: Error finding container 6e1f05b31a3f86fac46a09b9bd7a3536b46405071c4282fed9d2717469252dd5: Status 404 returned error can't find the container with id 6e1f05b31a3f86fac46a09b9bd7a3536b46405071c4282fed9d2717469252dd5 Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.936802 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/0cb383d8-296b-4298-8f2f-28edb1f1278f-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"0cb383d8-296b-4298-8f2f-28edb1f1278f\") " pod="openshift-logging/logging-loki-compactor-0" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.936859 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/0cb383d8-296b-4298-8f2f-28edb1f1278f-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"0cb383d8-296b-4298-8f2f-28edb1f1278f\") " pod="openshift-logging/logging-loki-compactor-0" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.936884 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gq8tr\" (UniqueName: \"kubernetes.io/projected/23d0313e-2bdb-4054-8951-2e29fd19f371-kube-api-access-gq8tr\") pod \"logging-loki-ingester-0\" (UID: \"23d0313e-2bdb-4054-8951-2e29fd19f371\") " pod="openshift-logging/logging-loki-ingester-0" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.936908 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8szpl\" (UniqueName: \"kubernetes.io/projected/0cb383d8-296b-4298-8f2f-28edb1f1278f-kube-api-access-8szpl\") pod \"logging-loki-compactor-0\" (UID: \"0cb383d8-296b-4298-8f2f-28edb1f1278f\") " pod="openshift-logging/logging-loki-compactor-0" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.936928 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-043cd109-f667-449a-a435-6cf3c5c51be4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-043cd109-f667-449a-a435-6cf3c5c51be4\") pod \"logging-loki-ingester-0\" (UID: \"23d0313e-2bdb-4054-8951-2e29fd19f371\") " pod="openshift-logging/logging-loki-ingester-0" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.937123 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0cb383d8-296b-4298-8f2f-28edb1f1278f-config\") pod \"logging-loki-compactor-0\" (UID: \"0cb383d8-296b-4298-8f2f-28edb1f1278f\") " pod="openshift-logging/logging-loki-compactor-0" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.937160 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-825406ee-7d96-46a0-a02f-5fd38d8529c4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-825406ee-7d96-46a0-a02f-5fd38d8529c4\") pod \"logging-loki-ingester-0\" (UID: \"23d0313e-2bdb-4054-8951-2e29fd19f371\") " pod="openshift-logging/logging-loki-ingester-0" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.937201 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/0cb383d8-296b-4298-8f2f-28edb1f1278f-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"0cb383d8-296b-4298-8f2f-28edb1f1278f\") " pod="openshift-logging/logging-loki-compactor-0" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.937234 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/23d0313e-2bdb-4054-8951-2e29fd19f371-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"23d0313e-2bdb-4054-8951-2e29fd19f371\") " pod="openshift-logging/logging-loki-ingester-0" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.937300 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0cb383d8-296b-4298-8f2f-28edb1f1278f-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"0cb383d8-296b-4298-8f2f-28edb1f1278f\") " pod="openshift-logging/logging-loki-compactor-0" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.937362 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/23d0313e-2bdb-4054-8951-2e29fd19f371-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"23d0313e-2bdb-4054-8951-2e29fd19f371\") " pod="openshift-logging/logging-loki-ingester-0" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.937381 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23d0313e-2bdb-4054-8951-2e29fd19f371-config\") pod \"logging-loki-ingester-0\" (UID: \"23d0313e-2bdb-4054-8951-2e29fd19f371\") " pod="openshift-logging/logging-loki-ingester-0" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.937656 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-3f91153b-687b-4e21-bbe2-b65187bf45e1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3f91153b-687b-4e21-bbe2-b65187bf45e1\") pod \"logging-loki-compactor-0\" (UID: \"0cb383d8-296b-4298-8f2f-28edb1f1278f\") " pod="openshift-logging/logging-loki-compactor-0" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.937744 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/23d0313e-2bdb-4054-8951-2e29fd19f371-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"23d0313e-2bdb-4054-8951-2e29fd19f371\") " pod="openshift-logging/logging-loki-ingester-0" Nov 26 22:33:12 crc kubenswrapper[4903]: I1126 22:33:12.937787 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/23d0313e-2bdb-4054-8951-2e29fd19f371-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"23d0313e-2bdb-4054-8951-2e29fd19f371\") " pod="openshift-logging/logging-loki-ingester-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.039319 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/0cb383d8-296b-4298-8f2f-28edb1f1278f-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"0cb383d8-296b-4298-8f2f-28edb1f1278f\") " pod="openshift-logging/logging-loki-compactor-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.039371 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gq8tr\" (UniqueName: \"kubernetes.io/projected/23d0313e-2bdb-4054-8951-2e29fd19f371-kube-api-access-gq8tr\") pod \"logging-loki-ingester-0\" (UID: \"23d0313e-2bdb-4054-8951-2e29fd19f371\") " pod="openshift-logging/logging-loki-ingester-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.039394 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8szpl\" (UniqueName: \"kubernetes.io/projected/0cb383d8-296b-4298-8f2f-28edb1f1278f-kube-api-access-8szpl\") pod \"logging-loki-compactor-0\" (UID: \"0cb383d8-296b-4298-8f2f-28edb1f1278f\") " pod="openshift-logging/logging-loki-compactor-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.039415 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-043cd109-f667-449a-a435-6cf3c5c51be4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-043cd109-f667-449a-a435-6cf3c5c51be4\") pod \"logging-loki-ingester-0\" (UID: \"23d0313e-2bdb-4054-8951-2e29fd19f371\") " pod="openshift-logging/logging-loki-ingester-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.039439 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4ca2376-fa84-4a6c-b47b-3661bacfd578-config\") pod \"logging-loki-index-gateway-0\" (UID: \"d4ca2376-fa84-4a6c-b47b-3661bacfd578\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.039461 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0cb383d8-296b-4298-8f2f-28edb1f1278f-config\") pod \"logging-loki-compactor-0\" (UID: \"0cb383d8-296b-4298-8f2f-28edb1f1278f\") " pod="openshift-logging/logging-loki-compactor-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.039483 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/23d0313e-2bdb-4054-8951-2e29fd19f371-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"23d0313e-2bdb-4054-8951-2e29fd19f371\") " pod="openshift-logging/logging-loki-ingester-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.039500 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/d4ca2376-fa84-4a6c-b47b-3661bacfd578-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"d4ca2376-fa84-4a6c-b47b-3661bacfd578\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.039532 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23d0313e-2bdb-4054-8951-2e29fd19f371-config\") pod \"logging-loki-ingester-0\" (UID: \"23d0313e-2bdb-4054-8951-2e29fd19f371\") " pod="openshift-logging/logging-loki-ingester-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.039558 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-95a5af9d-910b-442f-ad50-78a8a50757bb\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-95a5af9d-910b-442f-ad50-78a8a50757bb\") pod \"logging-loki-index-gateway-0\" (UID: \"d4ca2376-fa84-4a6c-b47b-3661bacfd578\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.039588 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/0cb383d8-296b-4298-8f2f-28edb1f1278f-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"0cb383d8-296b-4298-8f2f-28edb1f1278f\") " pod="openshift-logging/logging-loki-compactor-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.039614 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d4ca2376-fa84-4a6c-b47b-3661bacfd578-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"d4ca2376-fa84-4a6c-b47b-3661bacfd578\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.039632 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/d4ca2376-fa84-4a6c-b47b-3661bacfd578-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"d4ca2376-fa84-4a6c-b47b-3661bacfd578\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.039650 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-825406ee-7d96-46a0-a02f-5fd38d8529c4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-825406ee-7d96-46a0-a02f-5fd38d8529c4\") pod \"logging-loki-ingester-0\" (UID: \"23d0313e-2bdb-4054-8951-2e29fd19f371\") " pod="openshift-logging/logging-loki-ingester-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.039670 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/0cb383d8-296b-4298-8f2f-28edb1f1278f-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"0cb383d8-296b-4298-8f2f-28edb1f1278f\") " pod="openshift-logging/logging-loki-compactor-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.039702 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7gq8p\" (UniqueName: \"kubernetes.io/projected/d4ca2376-fa84-4a6c-b47b-3661bacfd578-kube-api-access-7gq8p\") pod \"logging-loki-index-gateway-0\" (UID: \"d4ca2376-fa84-4a6c-b47b-3661bacfd578\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.039723 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0cb383d8-296b-4298-8f2f-28edb1f1278f-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"0cb383d8-296b-4298-8f2f-28edb1f1278f\") " pod="openshift-logging/logging-loki-compactor-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.039746 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/23d0313e-2bdb-4054-8951-2e29fd19f371-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"23d0313e-2bdb-4054-8951-2e29fd19f371\") " pod="openshift-logging/logging-loki-ingester-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.039774 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/d4ca2376-fa84-4a6c-b47b-3661bacfd578-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"d4ca2376-fa84-4a6c-b47b-3661bacfd578\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.039796 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-3f91153b-687b-4e21-bbe2-b65187bf45e1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3f91153b-687b-4e21-bbe2-b65187bf45e1\") pod \"logging-loki-compactor-0\" (UID: \"0cb383d8-296b-4298-8f2f-28edb1f1278f\") " pod="openshift-logging/logging-loki-compactor-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.039817 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/23d0313e-2bdb-4054-8951-2e29fd19f371-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"23d0313e-2bdb-4054-8951-2e29fd19f371\") " pod="openshift-logging/logging-loki-ingester-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.039833 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/23d0313e-2bdb-4054-8951-2e29fd19f371-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"23d0313e-2bdb-4054-8951-2e29fd19f371\") " pod="openshift-logging/logging-loki-ingester-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.041915 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0cb383d8-296b-4298-8f2f-28edb1f1278f-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"0cb383d8-296b-4298-8f2f-28edb1f1278f\") " pod="openshift-logging/logging-loki-compactor-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.043375 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/23d0313e-2bdb-4054-8951-2e29fd19f371-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"23d0313e-2bdb-4054-8951-2e29fd19f371\") " pod="openshift-logging/logging-loki-ingester-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.043834 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0cb383d8-296b-4298-8f2f-28edb1f1278f-config\") pod \"logging-loki-compactor-0\" (UID: \"0cb383d8-296b-4298-8f2f-28edb1f1278f\") " pod="openshift-logging/logging-loki-compactor-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.045335 4903 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.045387 4903 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-825406ee-7d96-46a0-a02f-5fd38d8529c4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-825406ee-7d96-46a0-a02f-5fd38d8529c4\") pod \"logging-loki-ingester-0\" (UID: \"23d0313e-2bdb-4054-8951-2e29fd19f371\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/8060f83de860002f426ecf37a6107f0332dff8d5fe1f67affc83f4b8c06f18d9/globalmount\"" pod="openshift-logging/logging-loki-ingester-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.045462 4903 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.045491 4903 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.045507 4903 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-043cd109-f667-449a-a435-6cf3c5c51be4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-043cd109-f667-449a-a435-6cf3c5c51be4\") pod \"logging-loki-ingester-0\" (UID: \"23d0313e-2bdb-4054-8951-2e29fd19f371\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/bf3ddf749ca7e04eea9428df10d5eac478d87f7f8e6be95571530d2a8e23a9fc/globalmount\"" pod="openshift-logging/logging-loki-ingester-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.045542 4903 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-3f91153b-687b-4e21-bbe2-b65187bf45e1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3f91153b-687b-4e21-bbe2-b65187bf45e1\") pod \"logging-loki-compactor-0\" (UID: \"0cb383d8-296b-4298-8f2f-28edb1f1278f\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/131a49956067b7b51e8456ba9dfa428f8211dc619561dc4adf0817ff1e472c0c/globalmount\"" pod="openshift-logging/logging-loki-compactor-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.045851 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23d0313e-2bdb-4054-8951-2e29fd19f371-config\") pod \"logging-loki-ingester-0\" (UID: \"23d0313e-2bdb-4054-8951-2e29fd19f371\") " pod="openshift-logging/logging-loki-ingester-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.051121 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/0cb383d8-296b-4298-8f2f-28edb1f1278f-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"0cb383d8-296b-4298-8f2f-28edb1f1278f\") " pod="openshift-logging/logging-loki-compactor-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.054159 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/23d0313e-2bdb-4054-8951-2e29fd19f371-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"23d0313e-2bdb-4054-8951-2e29fd19f371\") " pod="openshift-logging/logging-loki-ingester-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.054306 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/0cb383d8-296b-4298-8f2f-28edb1f1278f-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"0cb383d8-296b-4298-8f2f-28edb1f1278f\") " pod="openshift-logging/logging-loki-compactor-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.054416 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/23d0313e-2bdb-4054-8951-2e29fd19f371-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"23d0313e-2bdb-4054-8951-2e29fd19f371\") " pod="openshift-logging/logging-loki-ingester-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.056654 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/23d0313e-2bdb-4054-8951-2e29fd19f371-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"23d0313e-2bdb-4054-8951-2e29fd19f371\") " pod="openshift-logging/logging-loki-ingester-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.056729 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/0cb383d8-296b-4298-8f2f-28edb1f1278f-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"0cb383d8-296b-4298-8f2f-28edb1f1278f\") " pod="openshift-logging/logging-loki-compactor-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.060103 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gq8tr\" (UniqueName: \"kubernetes.io/projected/23d0313e-2bdb-4054-8951-2e29fd19f371-kube-api-access-gq8tr\") pod \"logging-loki-ingester-0\" (UID: \"23d0313e-2bdb-4054-8951-2e29fd19f371\") " pod="openshift-logging/logging-loki-ingester-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.067548 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8szpl\" (UniqueName: \"kubernetes.io/projected/0cb383d8-296b-4298-8f2f-28edb1f1278f-kube-api-access-8szpl\") pod \"logging-loki-compactor-0\" (UID: \"0cb383d8-296b-4298-8f2f-28edb1f1278f\") " pod="openshift-logging/logging-loki-compactor-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.075773 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-825406ee-7d96-46a0-a02f-5fd38d8529c4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-825406ee-7d96-46a0-a02f-5fd38d8529c4\") pod \"logging-loki-ingester-0\" (UID: \"23d0313e-2bdb-4054-8951-2e29fd19f371\") " pod="openshift-logging/logging-loki-ingester-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.081531 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-043cd109-f667-449a-a435-6cf3c5c51be4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-043cd109-f667-449a-a435-6cf3c5c51be4\") pod \"logging-loki-ingester-0\" (UID: \"23d0313e-2bdb-4054-8951-2e29fd19f371\") " pod="openshift-logging/logging-loki-ingester-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.083066 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-3f91153b-687b-4e21-bbe2-b65187bf45e1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3f91153b-687b-4e21-bbe2-b65187bf45e1\") pod \"logging-loki-compactor-0\" (UID: \"0cb383d8-296b-4298-8f2f-28edb1f1278f\") " pod="openshift-logging/logging-loki-compactor-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.141659 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/d4ca2376-fa84-4a6c-b47b-3661bacfd578-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"d4ca2376-fa84-4a6c-b47b-3661bacfd578\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.141789 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-95a5af9d-910b-442f-ad50-78a8a50757bb\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-95a5af9d-910b-442f-ad50-78a8a50757bb\") pod \"logging-loki-index-gateway-0\" (UID: \"d4ca2376-fa84-4a6c-b47b-3661bacfd578\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.141857 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d4ca2376-fa84-4a6c-b47b-3661bacfd578-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"d4ca2376-fa84-4a6c-b47b-3661bacfd578\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.141885 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/d4ca2376-fa84-4a6c-b47b-3661bacfd578-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"d4ca2376-fa84-4a6c-b47b-3661bacfd578\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.141918 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7gq8p\" (UniqueName: \"kubernetes.io/projected/d4ca2376-fa84-4a6c-b47b-3661bacfd578-kube-api-access-7gq8p\") pod \"logging-loki-index-gateway-0\" (UID: \"d4ca2376-fa84-4a6c-b47b-3661bacfd578\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.141986 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/d4ca2376-fa84-4a6c-b47b-3661bacfd578-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"d4ca2376-fa84-4a6c-b47b-3661bacfd578\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.142073 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4ca2376-fa84-4a6c-b47b-3661bacfd578-config\") pod \"logging-loki-index-gateway-0\" (UID: \"d4ca2376-fa84-4a6c-b47b-3661bacfd578\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.143161 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4ca2376-fa84-4a6c-b47b-3661bacfd578-config\") pod \"logging-loki-index-gateway-0\" (UID: \"d4ca2376-fa84-4a6c-b47b-3661bacfd578\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.143564 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d4ca2376-fa84-4a6c-b47b-3661bacfd578-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"d4ca2376-fa84-4a6c-b47b-3661bacfd578\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.146390 4903 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.146436 4903 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-95a5af9d-910b-442f-ad50-78a8a50757bb\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-95a5af9d-910b-442f-ad50-78a8a50757bb\") pod \"logging-loki-index-gateway-0\" (UID: \"d4ca2376-fa84-4a6c-b47b-3661bacfd578\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/cc0719185377567fa885c7508e24aee39f9ad77ab340aadfdb04123bf0eff62b/globalmount\"" pod="openshift-logging/logging-loki-index-gateway-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.146458 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/d4ca2376-fa84-4a6c-b47b-3661bacfd578-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"d4ca2376-fa84-4a6c-b47b-3661bacfd578\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.147551 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/d4ca2376-fa84-4a6c-b47b-3661bacfd578-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"d4ca2376-fa84-4a6c-b47b-3661bacfd578\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.147772 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/d4ca2376-fa84-4a6c-b47b-3661bacfd578-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"d4ca2376-fa84-4a6c-b47b-3661bacfd578\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.156210 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-compactor-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.161199 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7gq8p\" (UniqueName: \"kubernetes.io/projected/d4ca2376-fa84-4a6c-b47b-3661bacfd578-kube-api-access-7gq8p\") pod \"logging-loki-index-gateway-0\" (UID: \"d4ca2376-fa84-4a6c-b47b-3661bacfd578\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.182349 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-95a5af9d-910b-442f-ad50-78a8a50757bb\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-95a5af9d-910b-442f-ad50-78a8a50757bb\") pod \"logging-loki-index-gateway-0\" (UID: \"d4ca2376-fa84-4a6c-b47b-3661bacfd578\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.261166 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-index-gateway-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.372530 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-ingester-0" Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.431784 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Nov 26 22:33:13 crc kubenswrapper[4903]: W1126 22:33:13.440324 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0cb383d8_296b_4298_8f2f_28edb1f1278f.slice/crio-0896e7e44c99bd853ea5e13767b44211bfc10b788c0630cb6fa5dca844f12615 WatchSource:0}: Error finding container 0896e7e44c99bd853ea5e13767b44211bfc10b788c0630cb6fa5dca844f12615: Status 404 returned error can't find the container with id 0896e7e44c99bd853ea5e13767b44211bfc10b788c0630cb6fa5dca844f12615 Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.739717 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Nov 26 22:33:13 crc kubenswrapper[4903]: W1126 22:33:13.750174 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd4ca2376_fa84_4a6c_b47b_3661bacfd578.slice/crio-efecc854cd86af7213271dcd55f4a35715d6e2ab5498b8908060d7f1a2e995e0 WatchSource:0}: Error finding container efecc854cd86af7213271dcd55f4a35715d6e2ab5498b8908060d7f1a2e995e0: Status 404 returned error can't find the container with id efecc854cd86af7213271dcd55f4a35715d6e2ab5498b8908060d7f1a2e995e0 Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.887155 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Nov 26 22:33:13 crc kubenswrapper[4903]: W1126 22:33:13.918926 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod23d0313e_2bdb_4054_8951_2e29fd19f371.slice/crio-4a038abbb2e23fcf5787261e1b3e5caa010f419b2a35464b5b0ded955c6a3da8 WatchSource:0}: Error finding container 4a038abbb2e23fcf5787261e1b3e5caa010f419b2a35464b5b0ded955c6a3da8: Status 404 returned error can't find the container with id 4a038abbb2e23fcf5787261e1b3e5caa010f419b2a35464b5b0ded955c6a3da8 Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.928724 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-compactor-0" event={"ID":"0cb383d8-296b-4298-8f2f-28edb1f1278f","Type":"ContainerStarted","Data":"0896e7e44c99bd853ea5e13767b44211bfc10b788c0630cb6fa5dca844f12615"} Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.930288 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-index-gateway-0" event={"ID":"d4ca2376-fa84-4a6c-b47b-3661bacfd578","Type":"ContainerStarted","Data":"efecc854cd86af7213271dcd55f4a35715d6e2ab5498b8908060d7f1a2e995e0"} Nov 26 22:33:13 crc kubenswrapper[4903]: I1126 22:33:13.931834 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" event={"ID":"64e0c0a9-13e7-4f0b-989d-8f217958cd92","Type":"ContainerStarted","Data":"6e1f05b31a3f86fac46a09b9bd7a3536b46405071c4282fed9d2717469252dd5"} Nov 26 22:33:14 crc kubenswrapper[4903]: I1126 22:33:14.937630 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-ingester-0" event={"ID":"23d0313e-2bdb-4054-8951-2e29fd19f371","Type":"ContainerStarted","Data":"4a038abbb2e23fcf5787261e1b3e5caa010f419b2a35464b5b0ded955c6a3da8"} Nov 26 22:33:16 crc kubenswrapper[4903]: I1126 22:33:16.953029 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-52tt7" event={"ID":"9fb3c717-adf5-483c-9d16-6d47d489a5e1","Type":"ContainerStarted","Data":"63df59668e750d8643f6fdc4cbb48678393873fdc55ce3c14f566d40ef247a44"} Nov 26 22:33:16 crc kubenswrapper[4903]: I1126 22:33:16.953408 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-52tt7" Nov 26 22:33:16 crc kubenswrapper[4903]: I1126 22:33:16.955403 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" event={"ID":"16a3e6c0-118c-4827-b39b-d9a59d959fec","Type":"ContainerStarted","Data":"8b3aa9a69ec06286e65181111a960c683814c7aba0fed1ba7e8b432ac85c1b7d"} Nov 26 22:33:16 crc kubenswrapper[4903]: I1126 22:33:16.957037 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-index-gateway-0" event={"ID":"d4ca2376-fa84-4a6c-b47b-3661bacfd578","Type":"ContainerStarted","Data":"70102036c9f6c35d4d78e16c7ba86d9df611b7860723c9a0be2ff95f52a12f3d"} Nov 26 22:33:16 crc kubenswrapper[4903]: I1126 22:33:16.957143 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-index-gateway-0" Nov 26 22:33:16 crc kubenswrapper[4903]: I1126 22:33:16.958429 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-ingester-0" event={"ID":"23d0313e-2bdb-4054-8951-2e29fd19f371","Type":"ContainerStarted","Data":"299eef040bca8a57f420fcccdc03bb10408bdedeb27c91e0d36e241b468894e8"} Nov 26 22:33:16 crc kubenswrapper[4903]: I1126 22:33:16.958964 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-ingester-0" Nov 26 22:33:16 crc kubenswrapper[4903]: I1126 22:33:16.960341 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" event={"ID":"64e0c0a9-13e7-4f0b-989d-8f217958cd92","Type":"ContainerStarted","Data":"616c51332da255f79d726e56d0db96005b21a02778c75450b55b087a50ac797d"} Nov 26 22:33:16 crc kubenswrapper[4903]: I1126 22:33:16.961792 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-querier-5895d59bb8-k45nx" event={"ID":"138798d6-77b9-4e20-970b-d83e0378e667","Type":"ContainerStarted","Data":"9eeae0bf64dff37542980a35df0559525aeb5699a5b828198fd88b20ba27f1ce"} Nov 26 22:33:16 crc kubenswrapper[4903]: I1126 22:33:16.961861 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-querier-5895d59bb8-k45nx" Nov 26 22:33:16 crc kubenswrapper[4903]: I1126 22:33:16.963707 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-distributor-76cc67bf56-htm5b" event={"ID":"546d4145-a63b-4664-86d0-9ce432670a7b","Type":"ContainerStarted","Data":"03ea555d6a5cf61e2a3c2e5367f81d28ba9fa6e1ef341426c9f175ab6d49f447"} Nov 26 22:33:16 crc kubenswrapper[4903]: I1126 22:33:16.963803 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-distributor-76cc67bf56-htm5b" Nov 26 22:33:16 crc kubenswrapper[4903]: I1126 22:33:16.965286 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-compactor-0" event={"ID":"0cb383d8-296b-4298-8f2f-28edb1f1278f","Type":"ContainerStarted","Data":"d4037a46db30aca8b0dec0b478746d27cae410aafb0930661590c4114b06ac74"} Nov 26 22:33:16 crc kubenswrapper[4903]: I1126 22:33:16.965434 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-compactor-0" Nov 26 22:33:16 crc kubenswrapper[4903]: I1126 22:33:16.974292 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-52tt7" podStartSLOduration=1.996023949 podStartE2EDuration="5.974275853s" podCreationTimestamp="2025-11-26 22:33:11 +0000 UTC" firstStartedPulling="2025-11-26 22:33:12.578422044 +0000 UTC m=+721.268656944" lastFinishedPulling="2025-11-26 22:33:16.556673948 +0000 UTC m=+725.246908848" observedRunningTime="2025-11-26 22:33:16.969089853 +0000 UTC m=+725.659324763" watchObservedRunningTime="2025-11-26 22:33:16.974275853 +0000 UTC m=+725.664510763" Nov 26 22:33:16 crc kubenswrapper[4903]: I1126 22:33:16.986625 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-querier-5895d59bb8-k45nx" podStartSLOduration=2.11545351 podStartE2EDuration="5.986613927s" podCreationTimestamp="2025-11-26 22:33:11 +0000 UTC" firstStartedPulling="2025-11-26 22:33:12.586447071 +0000 UTC m=+721.276681981" lastFinishedPulling="2025-11-26 22:33:16.457607448 +0000 UTC m=+725.147842398" observedRunningTime="2025-11-26 22:33:16.984012966 +0000 UTC m=+725.674247866" watchObservedRunningTime="2025-11-26 22:33:16.986613927 +0000 UTC m=+725.676848837" Nov 26 22:33:16 crc kubenswrapper[4903]: I1126 22:33:16.999149 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-index-gateway-0" podStartSLOduration=3.184056183 podStartE2EDuration="5.999137116s" podCreationTimestamp="2025-11-26 22:33:11 +0000 UTC" firstStartedPulling="2025-11-26 22:33:13.754626138 +0000 UTC m=+722.444861058" lastFinishedPulling="2025-11-26 22:33:16.569707081 +0000 UTC m=+725.259941991" observedRunningTime="2025-11-26 22:33:16.99892753 +0000 UTC m=+725.689162440" watchObservedRunningTime="2025-11-26 22:33:16.999137116 +0000 UTC m=+725.689372026" Nov 26 22:33:17 crc kubenswrapper[4903]: I1126 22:33:17.018746 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-ingester-0" podStartSLOduration=3.372395737 podStartE2EDuration="6.018732086s" podCreationTimestamp="2025-11-26 22:33:11 +0000 UTC" firstStartedPulling="2025-11-26 22:33:13.92256044 +0000 UTC m=+722.612795360" lastFinishedPulling="2025-11-26 22:33:16.568896799 +0000 UTC m=+725.259131709" observedRunningTime="2025-11-26 22:33:17.016142446 +0000 UTC m=+725.706377356" watchObservedRunningTime="2025-11-26 22:33:17.018732086 +0000 UTC m=+725.708966996" Nov 26 22:33:17 crc kubenswrapper[4903]: I1126 22:33:17.035580 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-distributor-76cc67bf56-htm5b" podStartSLOduration=2.065814767 podStartE2EDuration="6.035563321s" podCreationTimestamp="2025-11-26 22:33:11 +0000 UTC" firstStartedPulling="2025-11-26 22:33:12.565548456 +0000 UTC m=+721.255783366" lastFinishedPulling="2025-11-26 22:33:16.535297 +0000 UTC m=+725.225531920" observedRunningTime="2025-11-26 22:33:17.03404073 +0000 UTC m=+725.724275660" watchObservedRunningTime="2025-11-26 22:33:17.035563321 +0000 UTC m=+725.725798231" Nov 26 22:33:17 crc kubenswrapper[4903]: I1126 22:33:17.047432 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-compactor-0" podStartSLOduration=2.933467696 podStartE2EDuration="6.047403452s" podCreationTimestamp="2025-11-26 22:33:11 +0000 UTC" firstStartedPulling="2025-11-26 22:33:13.442864905 +0000 UTC m=+722.133099825" lastFinishedPulling="2025-11-26 22:33:16.556800661 +0000 UTC m=+725.247035581" observedRunningTime="2025-11-26 22:33:17.047079512 +0000 UTC m=+725.737314422" watchObservedRunningTime="2025-11-26 22:33:17.047403452 +0000 UTC m=+725.737638362" Nov 26 22:33:20 crc kubenswrapper[4903]: I1126 22:33:20.006798 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" event={"ID":"64e0c0a9-13e7-4f0b-989d-8f217958cd92","Type":"ContainerStarted","Data":"08a1627885e9d78056a69ddcbb758bf99d987a6643b2e60f42d85cfd06728b5c"} Nov 26 22:33:20 crc kubenswrapper[4903]: I1126 22:33:20.007875 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" Nov 26 22:33:20 crc kubenswrapper[4903]: I1126 22:33:20.007944 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" Nov 26 22:33:20 crc kubenswrapper[4903]: I1126 22:33:20.010468 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" event={"ID":"16a3e6c0-118c-4827-b39b-d9a59d959fec","Type":"ContainerStarted","Data":"262091332b8ee3830f341f733397ac0272cc8ba928c43932f9d242d4ae424a43"} Nov 26 22:33:20 crc kubenswrapper[4903]: I1126 22:33:20.010859 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" Nov 26 22:33:20 crc kubenswrapper[4903]: I1126 22:33:20.023637 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" Nov 26 22:33:20 crc kubenswrapper[4903]: I1126 22:33:20.025183 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" Nov 26 22:33:20 crc kubenswrapper[4903]: I1126 22:33:20.043842 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" Nov 26 22:33:20 crc kubenswrapper[4903]: I1126 22:33:20.045012 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-tvgns" podStartSLOduration=2.697448402 podStartE2EDuration="9.044990851s" podCreationTimestamp="2025-11-26 22:33:11 +0000 UTC" firstStartedPulling="2025-11-26 22:33:12.927235289 +0000 UTC m=+721.617470209" lastFinishedPulling="2025-11-26 22:33:19.274777748 +0000 UTC m=+727.965012658" observedRunningTime="2025-11-26 22:33:20.038946787 +0000 UTC m=+728.729181737" watchObservedRunningTime="2025-11-26 22:33:20.044990851 +0000 UTC m=+728.735225791" Nov 26 22:33:20 crc kubenswrapper[4903]: I1126 22:33:20.113627 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" podStartSLOduration=2.726893247 podStartE2EDuration="9.113603947s" podCreationTimestamp="2025-11-26 22:33:11 +0000 UTC" firstStartedPulling="2025-11-26 22:33:12.896546468 +0000 UTC m=+721.586781388" lastFinishedPulling="2025-11-26 22:33:19.283257178 +0000 UTC m=+727.973492088" observedRunningTime="2025-11-26 22:33:20.103605336 +0000 UTC m=+728.793840326" watchObservedRunningTime="2025-11-26 22:33:20.113603947 +0000 UTC m=+728.803838897" Nov 26 22:33:21 crc kubenswrapper[4903]: I1126 22:33:21.018176 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" Nov 26 22:33:21 crc kubenswrapper[4903]: I1126 22:33:21.067156 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-6b8dc7bf86-g4fbs" Nov 26 22:33:31 crc kubenswrapper[4903]: I1126 22:33:31.949909 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-distributor-76cc67bf56-htm5b" Nov 26 22:33:32 crc kubenswrapper[4903]: I1126 22:33:32.078799 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-querier-5895d59bb8-k45nx" Nov 26 22:33:32 crc kubenswrapper[4903]: I1126 22:33:32.192647 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-52tt7" Nov 26 22:33:33 crc kubenswrapper[4903]: I1126 22:33:33.166993 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-compactor-0" Nov 26 22:33:33 crc kubenswrapper[4903]: I1126 22:33:33.269630 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-index-gateway-0" Nov 26 22:33:33 crc kubenswrapper[4903]: I1126 22:33:33.381650 4903 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: this instance owns no tokens Nov 26 22:33:33 crc kubenswrapper[4903]: I1126 22:33:33.381761 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="23d0313e-2bdb-4054-8951-2e29fd19f371" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 26 22:33:39 crc kubenswrapper[4903]: I1126 22:33:39.538023 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-d8tw8"] Nov 26 22:33:39 crc kubenswrapper[4903]: I1126 22:33:39.538282 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-d8tw8" podUID="15015a4f-e3d3-4042-bf77-70c01c7c05b6" containerName="controller-manager" containerID="cri-o://9c6e1caeb91f7e831b468dd07690f583fe6781585cc9c91c486994187a2152e8" gracePeriod=30 Nov 26 22:33:39 crc kubenswrapper[4903]: I1126 22:33:39.619482 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp"] Nov 26 22:33:39 crc kubenswrapper[4903]: I1126 22:33:39.619928 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp" podUID="979e7792-1bc6-482b-a63b-fd6d1227970a" containerName="route-controller-manager" containerID="cri-o://08a57e9c069e05bb2210b55cc749a4820f7b146e13f88cc57a8bc71f615d7285" gracePeriod=30 Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.035391 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-d8tw8" Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.040307 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp" Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.116056 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/15015a4f-e3d3-4042-bf77-70c01c7c05b6-config\") pod \"15015a4f-e3d3-4042-bf77-70c01c7c05b6\" (UID: \"15015a4f-e3d3-4042-bf77-70c01c7c05b6\") " Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.116179 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jzkhj\" (UniqueName: \"kubernetes.io/projected/979e7792-1bc6-482b-a63b-fd6d1227970a-kube-api-access-jzkhj\") pod \"979e7792-1bc6-482b-a63b-fd6d1227970a\" (UID: \"979e7792-1bc6-482b-a63b-fd6d1227970a\") " Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.116221 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kr7rk\" (UniqueName: \"kubernetes.io/projected/15015a4f-e3d3-4042-bf77-70c01c7c05b6-kube-api-access-kr7rk\") pod \"15015a4f-e3d3-4042-bf77-70c01c7c05b6\" (UID: \"15015a4f-e3d3-4042-bf77-70c01c7c05b6\") " Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.116257 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/979e7792-1bc6-482b-a63b-fd6d1227970a-client-ca\") pod \"979e7792-1bc6-482b-a63b-fd6d1227970a\" (UID: \"979e7792-1bc6-482b-a63b-fd6d1227970a\") " Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.116282 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/15015a4f-e3d3-4042-bf77-70c01c7c05b6-serving-cert\") pod \"15015a4f-e3d3-4042-bf77-70c01c7c05b6\" (UID: \"15015a4f-e3d3-4042-bf77-70c01c7c05b6\") " Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.116868 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/979e7792-1bc6-482b-a63b-fd6d1227970a-client-ca" (OuterVolumeSpecName: "client-ca") pod "979e7792-1bc6-482b-a63b-fd6d1227970a" (UID: "979e7792-1bc6-482b-a63b-fd6d1227970a"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.116907 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/15015a4f-e3d3-4042-bf77-70c01c7c05b6-config" (OuterVolumeSpecName: "config") pod "15015a4f-e3d3-4042-bf77-70c01c7c05b6" (UID: "15015a4f-e3d3-4042-bf77-70c01c7c05b6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.116961 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/979e7792-1bc6-482b-a63b-fd6d1227970a-serving-cert\") pod \"979e7792-1bc6-482b-a63b-fd6d1227970a\" (UID: \"979e7792-1bc6-482b-a63b-fd6d1227970a\") " Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.117024 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/15015a4f-e3d3-4042-bf77-70c01c7c05b6-proxy-ca-bundles\") pod \"15015a4f-e3d3-4042-bf77-70c01c7c05b6\" (UID: \"15015a4f-e3d3-4042-bf77-70c01c7c05b6\") " Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.117052 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/15015a4f-e3d3-4042-bf77-70c01c7c05b6-client-ca\") pod \"15015a4f-e3d3-4042-bf77-70c01c7c05b6\" (UID: \"15015a4f-e3d3-4042-bf77-70c01c7c05b6\") " Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.117878 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/15015a4f-e3d3-4042-bf77-70c01c7c05b6-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "15015a4f-e3d3-4042-bf77-70c01c7c05b6" (UID: "15015a4f-e3d3-4042-bf77-70c01c7c05b6"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.119618 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/15015a4f-e3d3-4042-bf77-70c01c7c05b6-client-ca" (OuterVolumeSpecName: "client-ca") pod "15015a4f-e3d3-4042-bf77-70c01c7c05b6" (UID: "15015a4f-e3d3-4042-bf77-70c01c7c05b6"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.120353 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/979e7792-1bc6-482b-a63b-fd6d1227970a-config\") pod \"979e7792-1bc6-482b-a63b-fd6d1227970a\" (UID: \"979e7792-1bc6-482b-a63b-fd6d1227970a\") " Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.121652 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/979e7792-1bc6-482b-a63b-fd6d1227970a-config" (OuterVolumeSpecName: "config") pod "979e7792-1bc6-482b-a63b-fd6d1227970a" (UID: "979e7792-1bc6-482b-a63b-fd6d1227970a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.122055 4903 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/15015a4f-e3d3-4042-bf77-70c01c7c05b6-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.122082 4903 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/15015a4f-e3d3-4042-bf77-70c01c7c05b6-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.122096 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/15015a4f-e3d3-4042-bf77-70c01c7c05b6-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.122108 4903 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/979e7792-1bc6-482b-a63b-fd6d1227970a-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.123562 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/979e7792-1bc6-482b-a63b-fd6d1227970a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "979e7792-1bc6-482b-a63b-fd6d1227970a" (UID: "979e7792-1bc6-482b-a63b-fd6d1227970a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.124177 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/979e7792-1bc6-482b-a63b-fd6d1227970a-kube-api-access-jzkhj" (OuterVolumeSpecName: "kube-api-access-jzkhj") pod "979e7792-1bc6-482b-a63b-fd6d1227970a" (UID: "979e7792-1bc6-482b-a63b-fd6d1227970a"). InnerVolumeSpecName "kube-api-access-jzkhj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.133164 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15015a4f-e3d3-4042-bf77-70c01c7c05b6-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "15015a4f-e3d3-4042-bf77-70c01c7c05b6" (UID: "15015a4f-e3d3-4042-bf77-70c01c7c05b6"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.134047 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/15015a4f-e3d3-4042-bf77-70c01c7c05b6-kube-api-access-kr7rk" (OuterVolumeSpecName: "kube-api-access-kr7rk") pod "15015a4f-e3d3-4042-bf77-70c01c7c05b6" (UID: "15015a4f-e3d3-4042-bf77-70c01c7c05b6"). InnerVolumeSpecName "kube-api-access-kr7rk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.190312 4903 generic.go:334] "Generic (PLEG): container finished" podID="979e7792-1bc6-482b-a63b-fd6d1227970a" containerID="08a57e9c069e05bb2210b55cc749a4820f7b146e13f88cc57a8bc71f615d7285" exitCode=0 Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.190371 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp" Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.190386 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp" event={"ID":"979e7792-1bc6-482b-a63b-fd6d1227970a","Type":"ContainerDied","Data":"08a57e9c069e05bb2210b55cc749a4820f7b146e13f88cc57a8bc71f615d7285"} Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.190412 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp" event={"ID":"979e7792-1bc6-482b-a63b-fd6d1227970a","Type":"ContainerDied","Data":"80005c51987ed8190d0c8392294ea267d59ebacea635fc7a8afa489a4d943f05"} Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.190428 4903 scope.go:117] "RemoveContainer" containerID="08a57e9c069e05bb2210b55cc749a4820f7b146e13f88cc57a8bc71f615d7285" Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.194226 4903 generic.go:334] "Generic (PLEG): container finished" podID="15015a4f-e3d3-4042-bf77-70c01c7c05b6" containerID="9c6e1caeb91f7e831b468dd07690f583fe6781585cc9c91c486994187a2152e8" exitCode=0 Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.194582 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-d8tw8" event={"ID":"15015a4f-e3d3-4042-bf77-70c01c7c05b6","Type":"ContainerDied","Data":"9c6e1caeb91f7e831b468dd07690f583fe6781585cc9c91c486994187a2152e8"} Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.194669 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-d8tw8" event={"ID":"15015a4f-e3d3-4042-bf77-70c01c7c05b6","Type":"ContainerDied","Data":"005cc6ce29e0ec3a85b99fc9ff657145754a64646bcc609b4cd8e0e24867c194"} Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.194629 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-d8tw8" Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.223395 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jzkhj\" (UniqueName: \"kubernetes.io/projected/979e7792-1bc6-482b-a63b-fd6d1227970a-kube-api-access-jzkhj\") on node \"crc\" DevicePath \"\"" Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.223427 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kr7rk\" (UniqueName: \"kubernetes.io/projected/15015a4f-e3d3-4042-bf77-70c01c7c05b6-kube-api-access-kr7rk\") on node \"crc\" DevicePath \"\"" Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.223438 4903 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/15015a4f-e3d3-4042-bf77-70c01c7c05b6-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.223448 4903 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/979e7792-1bc6-482b-a63b-fd6d1227970a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.223459 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/979e7792-1bc6-482b-a63b-fd6d1227970a-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.223498 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp"] Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.228843 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-7vvhp"] Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.235428 4903 scope.go:117] "RemoveContainer" containerID="08a57e9c069e05bb2210b55cc749a4820f7b146e13f88cc57a8bc71f615d7285" Nov 26 22:33:40 crc kubenswrapper[4903]: E1126 22:33:40.235914 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"08a57e9c069e05bb2210b55cc749a4820f7b146e13f88cc57a8bc71f615d7285\": container with ID starting with 08a57e9c069e05bb2210b55cc749a4820f7b146e13f88cc57a8bc71f615d7285 not found: ID does not exist" containerID="08a57e9c069e05bb2210b55cc749a4820f7b146e13f88cc57a8bc71f615d7285" Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.235940 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"08a57e9c069e05bb2210b55cc749a4820f7b146e13f88cc57a8bc71f615d7285"} err="failed to get container status \"08a57e9c069e05bb2210b55cc749a4820f7b146e13f88cc57a8bc71f615d7285\": rpc error: code = NotFound desc = could not find container \"08a57e9c069e05bb2210b55cc749a4820f7b146e13f88cc57a8bc71f615d7285\": container with ID starting with 08a57e9c069e05bb2210b55cc749a4820f7b146e13f88cc57a8bc71f615d7285 not found: ID does not exist" Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.235960 4903 scope.go:117] "RemoveContainer" containerID="9c6e1caeb91f7e831b468dd07690f583fe6781585cc9c91c486994187a2152e8" Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.238748 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-d8tw8"] Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.242734 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-d8tw8"] Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.258171 4903 scope.go:117] "RemoveContainer" containerID="9c6e1caeb91f7e831b468dd07690f583fe6781585cc9c91c486994187a2152e8" Nov 26 22:33:40 crc kubenswrapper[4903]: E1126 22:33:40.266379 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9c6e1caeb91f7e831b468dd07690f583fe6781585cc9c91c486994187a2152e8\": container with ID starting with 9c6e1caeb91f7e831b468dd07690f583fe6781585cc9c91c486994187a2152e8 not found: ID does not exist" containerID="9c6e1caeb91f7e831b468dd07690f583fe6781585cc9c91c486994187a2152e8" Nov 26 22:33:40 crc kubenswrapper[4903]: I1126 22:33:40.268316 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c6e1caeb91f7e831b468dd07690f583fe6781585cc9c91c486994187a2152e8"} err="failed to get container status \"9c6e1caeb91f7e831b468dd07690f583fe6781585cc9c91c486994187a2152e8\": rpc error: code = NotFound desc = could not find container \"9c6e1caeb91f7e831b468dd07690f583fe6781585cc9c91c486994187a2152e8\": container with ID starting with 9c6e1caeb91f7e831b468dd07690f583fe6781585cc9c91c486994187a2152e8 not found: ID does not exist" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.309407 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-7b6fb5b89-wsl68"] Nov 26 22:33:41 crc kubenswrapper[4903]: E1126 22:33:41.310002 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="979e7792-1bc6-482b-a63b-fd6d1227970a" containerName="route-controller-manager" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.310027 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="979e7792-1bc6-482b-a63b-fd6d1227970a" containerName="route-controller-manager" Nov 26 22:33:41 crc kubenswrapper[4903]: E1126 22:33:41.310051 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15015a4f-e3d3-4042-bf77-70c01c7c05b6" containerName="controller-manager" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.310064 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="15015a4f-e3d3-4042-bf77-70c01c7c05b6" containerName="controller-manager" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.310332 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="979e7792-1bc6-482b-a63b-fd6d1227970a" containerName="route-controller-manager" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.310353 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="15015a4f-e3d3-4042-bf77-70c01c7c05b6" containerName="controller-manager" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.311213 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7b6fb5b89-wsl68" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.315334 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.315463 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.315646 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.316452 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.316886 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5b78f5fc4-dbptt"] Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.317097 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.317905 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.318578 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5b78f5fc4-dbptt" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.324385 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.325108 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.326281 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.326408 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.326594 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.327173 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.331262 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5b78f5fc4-dbptt"] Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.345122 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.350268 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7b6fb5b89-wsl68"] Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.445321 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1c63db9f-86a9-42c5-8456-c43f108c1973-client-ca\") pod \"route-controller-manager-5b78f5fc4-dbptt\" (UID: \"1c63db9f-86a9-42c5-8456-c43f108c1973\") " pod="openshift-route-controller-manager/route-controller-manager-5b78f5fc4-dbptt" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.445376 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c63db9f-86a9-42c5-8456-c43f108c1973-config\") pod \"route-controller-manager-5b78f5fc4-dbptt\" (UID: \"1c63db9f-86a9-42c5-8456-c43f108c1973\") " pod="openshift-route-controller-manager/route-controller-manager-5b78f5fc4-dbptt" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.445409 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d04bde65-37e2-4612-ad39-04d0846b128a-client-ca\") pod \"controller-manager-7b6fb5b89-wsl68\" (UID: \"d04bde65-37e2-4612-ad39-04d0846b128a\") " pod="openshift-controller-manager/controller-manager-7b6fb5b89-wsl68" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.445459 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d04bde65-37e2-4612-ad39-04d0846b128a-config\") pod \"controller-manager-7b6fb5b89-wsl68\" (UID: \"d04bde65-37e2-4612-ad39-04d0846b128a\") " pod="openshift-controller-manager/controller-manager-7b6fb5b89-wsl68" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.445909 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lvgdh\" (UniqueName: \"kubernetes.io/projected/1c63db9f-86a9-42c5-8456-c43f108c1973-kube-api-access-lvgdh\") pod \"route-controller-manager-5b78f5fc4-dbptt\" (UID: \"1c63db9f-86a9-42c5-8456-c43f108c1973\") " pod="openshift-route-controller-manager/route-controller-manager-5b78f5fc4-dbptt" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.446005 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d04bde65-37e2-4612-ad39-04d0846b128a-serving-cert\") pod \"controller-manager-7b6fb5b89-wsl68\" (UID: \"d04bde65-37e2-4612-ad39-04d0846b128a\") " pod="openshift-controller-manager/controller-manager-7b6fb5b89-wsl68" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.446074 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1c63db9f-86a9-42c5-8456-c43f108c1973-serving-cert\") pod \"route-controller-manager-5b78f5fc4-dbptt\" (UID: \"1c63db9f-86a9-42c5-8456-c43f108c1973\") " pod="openshift-route-controller-manager/route-controller-manager-5b78f5fc4-dbptt" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.446194 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d04bde65-37e2-4612-ad39-04d0846b128a-proxy-ca-bundles\") pod \"controller-manager-7b6fb5b89-wsl68\" (UID: \"d04bde65-37e2-4612-ad39-04d0846b128a\") " pod="openshift-controller-manager/controller-manager-7b6fb5b89-wsl68" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.446242 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwb45\" (UniqueName: \"kubernetes.io/projected/d04bde65-37e2-4612-ad39-04d0846b128a-kube-api-access-xwb45\") pod \"controller-manager-7b6fb5b89-wsl68\" (UID: \"d04bde65-37e2-4612-ad39-04d0846b128a\") " pod="openshift-controller-manager/controller-manager-7b6fb5b89-wsl68" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.548268 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lvgdh\" (UniqueName: \"kubernetes.io/projected/1c63db9f-86a9-42c5-8456-c43f108c1973-kube-api-access-lvgdh\") pod \"route-controller-manager-5b78f5fc4-dbptt\" (UID: \"1c63db9f-86a9-42c5-8456-c43f108c1973\") " pod="openshift-route-controller-manager/route-controller-manager-5b78f5fc4-dbptt" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.548671 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d04bde65-37e2-4612-ad39-04d0846b128a-serving-cert\") pod \"controller-manager-7b6fb5b89-wsl68\" (UID: \"d04bde65-37e2-4612-ad39-04d0846b128a\") " pod="openshift-controller-manager/controller-manager-7b6fb5b89-wsl68" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.548957 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1c63db9f-86a9-42c5-8456-c43f108c1973-serving-cert\") pod \"route-controller-manager-5b78f5fc4-dbptt\" (UID: \"1c63db9f-86a9-42c5-8456-c43f108c1973\") " pod="openshift-route-controller-manager/route-controller-manager-5b78f5fc4-dbptt" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.549183 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d04bde65-37e2-4612-ad39-04d0846b128a-proxy-ca-bundles\") pod \"controller-manager-7b6fb5b89-wsl68\" (UID: \"d04bde65-37e2-4612-ad39-04d0846b128a\") " pod="openshift-controller-manager/controller-manager-7b6fb5b89-wsl68" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.549379 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwb45\" (UniqueName: \"kubernetes.io/projected/d04bde65-37e2-4612-ad39-04d0846b128a-kube-api-access-xwb45\") pod \"controller-manager-7b6fb5b89-wsl68\" (UID: \"d04bde65-37e2-4612-ad39-04d0846b128a\") " pod="openshift-controller-manager/controller-manager-7b6fb5b89-wsl68" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.549617 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1c63db9f-86a9-42c5-8456-c43f108c1973-client-ca\") pod \"route-controller-manager-5b78f5fc4-dbptt\" (UID: \"1c63db9f-86a9-42c5-8456-c43f108c1973\") " pod="openshift-route-controller-manager/route-controller-manager-5b78f5fc4-dbptt" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.549846 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c63db9f-86a9-42c5-8456-c43f108c1973-config\") pod \"route-controller-manager-5b78f5fc4-dbptt\" (UID: \"1c63db9f-86a9-42c5-8456-c43f108c1973\") " pod="openshift-route-controller-manager/route-controller-manager-5b78f5fc4-dbptt" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.550075 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d04bde65-37e2-4612-ad39-04d0846b128a-client-ca\") pod \"controller-manager-7b6fb5b89-wsl68\" (UID: \"d04bde65-37e2-4612-ad39-04d0846b128a\") " pod="openshift-controller-manager/controller-manager-7b6fb5b89-wsl68" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.550270 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d04bde65-37e2-4612-ad39-04d0846b128a-config\") pod \"controller-manager-7b6fb5b89-wsl68\" (UID: \"d04bde65-37e2-4612-ad39-04d0846b128a\") " pod="openshift-controller-manager/controller-manager-7b6fb5b89-wsl68" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.551242 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1c63db9f-86a9-42c5-8456-c43f108c1973-client-ca\") pod \"route-controller-manager-5b78f5fc4-dbptt\" (UID: \"1c63db9f-86a9-42c5-8456-c43f108c1973\") " pod="openshift-route-controller-manager/route-controller-manager-5b78f5fc4-dbptt" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.551488 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d04bde65-37e2-4612-ad39-04d0846b128a-proxy-ca-bundles\") pod \"controller-manager-7b6fb5b89-wsl68\" (UID: \"d04bde65-37e2-4612-ad39-04d0846b128a\") " pod="openshift-controller-manager/controller-manager-7b6fb5b89-wsl68" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.551598 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d04bde65-37e2-4612-ad39-04d0846b128a-client-ca\") pod \"controller-manager-7b6fb5b89-wsl68\" (UID: \"d04bde65-37e2-4612-ad39-04d0846b128a\") " pod="openshift-controller-manager/controller-manager-7b6fb5b89-wsl68" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.551901 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c63db9f-86a9-42c5-8456-c43f108c1973-config\") pod \"route-controller-manager-5b78f5fc4-dbptt\" (UID: \"1c63db9f-86a9-42c5-8456-c43f108c1973\") " pod="openshift-route-controller-manager/route-controller-manager-5b78f5fc4-dbptt" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.552859 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d04bde65-37e2-4612-ad39-04d0846b128a-config\") pod \"controller-manager-7b6fb5b89-wsl68\" (UID: \"d04bde65-37e2-4612-ad39-04d0846b128a\") " pod="openshift-controller-manager/controller-manager-7b6fb5b89-wsl68" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.556971 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1c63db9f-86a9-42c5-8456-c43f108c1973-serving-cert\") pod \"route-controller-manager-5b78f5fc4-dbptt\" (UID: \"1c63db9f-86a9-42c5-8456-c43f108c1973\") " pod="openshift-route-controller-manager/route-controller-manager-5b78f5fc4-dbptt" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.557836 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d04bde65-37e2-4612-ad39-04d0846b128a-serving-cert\") pod \"controller-manager-7b6fb5b89-wsl68\" (UID: \"d04bde65-37e2-4612-ad39-04d0846b128a\") " pod="openshift-controller-manager/controller-manager-7b6fb5b89-wsl68" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.576275 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lvgdh\" (UniqueName: \"kubernetes.io/projected/1c63db9f-86a9-42c5-8456-c43f108c1973-kube-api-access-lvgdh\") pod \"route-controller-manager-5b78f5fc4-dbptt\" (UID: \"1c63db9f-86a9-42c5-8456-c43f108c1973\") " pod="openshift-route-controller-manager/route-controller-manager-5b78f5fc4-dbptt" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.583225 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwb45\" (UniqueName: \"kubernetes.io/projected/d04bde65-37e2-4612-ad39-04d0846b128a-kube-api-access-xwb45\") pod \"controller-manager-7b6fb5b89-wsl68\" (UID: \"d04bde65-37e2-4612-ad39-04d0846b128a\") " pod="openshift-controller-manager/controller-manager-7b6fb5b89-wsl68" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.649003 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7b6fb5b89-wsl68" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.662120 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5b78f5fc4-dbptt" Nov 26 22:33:41 crc kubenswrapper[4903]: I1126 22:33:41.997363 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7b6fb5b89-wsl68"] Nov 26 22:33:42 crc kubenswrapper[4903]: W1126 22:33:42.000395 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd04bde65_37e2_4612_ad39_04d0846b128a.slice/crio-d5ecc100817212fc8f6b34ced6012428a0be1932da2c6faf4d3574c379f44ebe WatchSource:0}: Error finding container d5ecc100817212fc8f6b34ced6012428a0be1932da2c6faf4d3574c379f44ebe: Status 404 returned error can't find the container with id d5ecc100817212fc8f6b34ced6012428a0be1932da2c6faf4d3574c379f44ebe Nov 26 22:33:42 crc kubenswrapper[4903]: I1126 22:33:42.020298 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5b78f5fc4-dbptt"] Nov 26 22:33:42 crc kubenswrapper[4903]: W1126 22:33:42.024400 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1c63db9f_86a9_42c5_8456_c43f108c1973.slice/crio-adaf7964c85d4bc95c3afdfcc86887453628f61aa42351d177dc24abbbf2d558 WatchSource:0}: Error finding container adaf7964c85d4bc95c3afdfcc86887453628f61aa42351d177dc24abbbf2d558: Status 404 returned error can't find the container with id adaf7964c85d4bc95c3afdfcc86887453628f61aa42351d177dc24abbbf2d558 Nov 26 22:33:42 crc kubenswrapper[4903]: I1126 22:33:42.038926 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="15015a4f-e3d3-4042-bf77-70c01c7c05b6" path="/var/lib/kubelet/pods/15015a4f-e3d3-4042-bf77-70c01c7c05b6/volumes" Nov 26 22:33:42 crc kubenswrapper[4903]: I1126 22:33:42.039649 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="979e7792-1bc6-482b-a63b-fd6d1227970a" path="/var/lib/kubelet/pods/979e7792-1bc6-482b-a63b-fd6d1227970a/volumes" Nov 26 22:33:42 crc kubenswrapper[4903]: I1126 22:33:42.216148 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5b78f5fc4-dbptt" event={"ID":"1c63db9f-86a9-42c5-8456-c43f108c1973","Type":"ContainerStarted","Data":"ee7dd02433961c1bd2e85f977212ea58d78e9b4a5e727bdbcffe59b05fb849c6"} Nov 26 22:33:42 crc kubenswrapper[4903]: I1126 22:33:42.216197 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5b78f5fc4-dbptt" event={"ID":"1c63db9f-86a9-42c5-8456-c43f108c1973","Type":"ContainerStarted","Data":"adaf7964c85d4bc95c3afdfcc86887453628f61aa42351d177dc24abbbf2d558"} Nov 26 22:33:42 crc kubenswrapper[4903]: I1126 22:33:42.216457 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-5b78f5fc4-dbptt" Nov 26 22:33:42 crc kubenswrapper[4903]: I1126 22:33:42.218509 4903 patch_prober.go:28] interesting pod/route-controller-manager-5b78f5fc4-dbptt container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.80:8443/healthz\": dial tcp 10.217.0.80:8443: connect: connection refused" start-of-body= Nov 26 22:33:42 crc kubenswrapper[4903]: I1126 22:33:42.218556 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-5b78f5fc4-dbptt" podUID="1c63db9f-86a9-42c5-8456-c43f108c1973" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.80:8443/healthz\": dial tcp 10.217.0.80:8443: connect: connection refused" Nov 26 22:33:42 crc kubenswrapper[4903]: I1126 22:33:42.219831 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7b6fb5b89-wsl68" event={"ID":"d04bde65-37e2-4612-ad39-04d0846b128a","Type":"ContainerStarted","Data":"06fbe03f8877177c57b9b6dbfdfb76d6aa732ecb69d7ea2f771e45c3de7c72ad"} Nov 26 22:33:42 crc kubenswrapper[4903]: I1126 22:33:42.219860 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7b6fb5b89-wsl68" event={"ID":"d04bde65-37e2-4612-ad39-04d0846b128a","Type":"ContainerStarted","Data":"d5ecc100817212fc8f6b34ced6012428a0be1932da2c6faf4d3574c379f44ebe"} Nov 26 22:33:42 crc kubenswrapper[4903]: I1126 22:33:42.220311 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-7b6fb5b89-wsl68" Nov 26 22:33:42 crc kubenswrapper[4903]: I1126 22:33:42.221459 4903 patch_prober.go:28] interesting pod/controller-manager-7b6fb5b89-wsl68 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.79:8443/healthz\": dial tcp 10.217.0.79:8443: connect: connection refused" start-of-body= Nov 26 22:33:42 crc kubenswrapper[4903]: I1126 22:33:42.221523 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-7b6fb5b89-wsl68" podUID="d04bde65-37e2-4612-ad39-04d0846b128a" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.79:8443/healthz\": dial tcp 10.217.0.79:8443: connect: connection refused" Nov 26 22:33:42 crc kubenswrapper[4903]: I1126 22:33:42.261520 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-5b78f5fc4-dbptt" podStartSLOduration=3.261501357 podStartE2EDuration="3.261501357s" podCreationTimestamp="2025-11-26 22:33:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:33:42.243293624 +0000 UTC m=+750.933528534" watchObservedRunningTime="2025-11-26 22:33:42.261501357 +0000 UTC m=+750.951736267" Nov 26 22:33:42 crc kubenswrapper[4903]: I1126 22:33:42.263090 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-7b6fb5b89-wsl68" podStartSLOduration=3.263082479 podStartE2EDuration="3.263082479s" podCreationTimestamp="2025-11-26 22:33:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:33:42.26090746 +0000 UTC m=+750.951142370" watchObservedRunningTime="2025-11-26 22:33:42.263082479 +0000 UTC m=+750.953317389" Nov 26 22:33:43 crc kubenswrapper[4903]: I1126 22:33:43.236441 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-7b6fb5b89-wsl68" Nov 26 22:33:43 crc kubenswrapper[4903]: I1126 22:33:43.237071 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-5b78f5fc4-dbptt" Nov 26 22:33:43 crc kubenswrapper[4903]: I1126 22:33:43.377521 4903 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: this instance owns no tokens Nov 26 22:33:43 crc kubenswrapper[4903]: I1126 22:33:43.377575 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="23d0313e-2bdb-4054-8951-2e29fd19f371" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 26 22:33:46 crc kubenswrapper[4903]: I1126 22:33:46.761905 4903 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 26 22:33:53 crc kubenswrapper[4903]: I1126 22:33:53.381336 4903 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: waiting for 15s after being ready Nov 26 22:33:53 crc kubenswrapper[4903]: I1126 22:33:53.382015 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="23d0313e-2bdb-4054-8951-2e29fd19f371" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 26 22:33:58 crc kubenswrapper[4903]: I1126 22:33:58.202852 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-2t5dn"] Nov 26 22:33:58 crc kubenswrapper[4903]: I1126 22:33:58.208224 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2t5dn" Nov 26 22:33:58 crc kubenswrapper[4903]: I1126 22:33:58.219567 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2t5dn"] Nov 26 22:33:58 crc kubenswrapper[4903]: I1126 22:33:58.349841 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/419ef374-1487-4796-ab40-0171619f08ef-utilities\") pod \"redhat-operators-2t5dn\" (UID: \"419ef374-1487-4796-ab40-0171619f08ef\") " pod="openshift-marketplace/redhat-operators-2t5dn" Nov 26 22:33:58 crc kubenswrapper[4903]: I1126 22:33:58.349950 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z9fcb\" (UniqueName: \"kubernetes.io/projected/419ef374-1487-4796-ab40-0171619f08ef-kube-api-access-z9fcb\") pod \"redhat-operators-2t5dn\" (UID: \"419ef374-1487-4796-ab40-0171619f08ef\") " pod="openshift-marketplace/redhat-operators-2t5dn" Nov 26 22:33:58 crc kubenswrapper[4903]: I1126 22:33:58.349978 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/419ef374-1487-4796-ab40-0171619f08ef-catalog-content\") pod \"redhat-operators-2t5dn\" (UID: \"419ef374-1487-4796-ab40-0171619f08ef\") " pod="openshift-marketplace/redhat-operators-2t5dn" Nov 26 22:33:58 crc kubenswrapper[4903]: I1126 22:33:58.452207 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/419ef374-1487-4796-ab40-0171619f08ef-utilities\") pod \"redhat-operators-2t5dn\" (UID: \"419ef374-1487-4796-ab40-0171619f08ef\") " pod="openshift-marketplace/redhat-operators-2t5dn" Nov 26 22:33:58 crc kubenswrapper[4903]: I1126 22:33:58.452353 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z9fcb\" (UniqueName: \"kubernetes.io/projected/419ef374-1487-4796-ab40-0171619f08ef-kube-api-access-z9fcb\") pod \"redhat-operators-2t5dn\" (UID: \"419ef374-1487-4796-ab40-0171619f08ef\") " pod="openshift-marketplace/redhat-operators-2t5dn" Nov 26 22:33:58 crc kubenswrapper[4903]: I1126 22:33:58.452394 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/419ef374-1487-4796-ab40-0171619f08ef-catalog-content\") pod \"redhat-operators-2t5dn\" (UID: \"419ef374-1487-4796-ab40-0171619f08ef\") " pod="openshift-marketplace/redhat-operators-2t5dn" Nov 26 22:33:58 crc kubenswrapper[4903]: I1126 22:33:58.452796 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/419ef374-1487-4796-ab40-0171619f08ef-utilities\") pod \"redhat-operators-2t5dn\" (UID: \"419ef374-1487-4796-ab40-0171619f08ef\") " pod="openshift-marketplace/redhat-operators-2t5dn" Nov 26 22:33:58 crc kubenswrapper[4903]: I1126 22:33:58.453042 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/419ef374-1487-4796-ab40-0171619f08ef-catalog-content\") pod \"redhat-operators-2t5dn\" (UID: \"419ef374-1487-4796-ab40-0171619f08ef\") " pod="openshift-marketplace/redhat-operators-2t5dn" Nov 26 22:33:58 crc kubenswrapper[4903]: I1126 22:33:58.479467 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z9fcb\" (UniqueName: \"kubernetes.io/projected/419ef374-1487-4796-ab40-0171619f08ef-kube-api-access-z9fcb\") pod \"redhat-operators-2t5dn\" (UID: \"419ef374-1487-4796-ab40-0171619f08ef\") " pod="openshift-marketplace/redhat-operators-2t5dn" Nov 26 22:33:58 crc kubenswrapper[4903]: I1126 22:33:58.543914 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2t5dn" Nov 26 22:33:58 crc kubenswrapper[4903]: I1126 22:33:58.979513 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2t5dn"] Nov 26 22:33:59 crc kubenswrapper[4903]: I1126 22:33:59.374643 4903 generic.go:334] "Generic (PLEG): container finished" podID="419ef374-1487-4796-ab40-0171619f08ef" containerID="ed4e61cff58cd4ced6a2354aa47b2f9eeceaa46bebf90e948b9ffd851fb13ee9" exitCode=0 Nov 26 22:33:59 crc kubenswrapper[4903]: I1126 22:33:59.374732 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2t5dn" event={"ID":"419ef374-1487-4796-ab40-0171619f08ef","Type":"ContainerDied","Data":"ed4e61cff58cd4ced6a2354aa47b2f9eeceaa46bebf90e948b9ffd851fb13ee9"} Nov 26 22:33:59 crc kubenswrapper[4903]: I1126 22:33:59.374935 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2t5dn" event={"ID":"419ef374-1487-4796-ab40-0171619f08ef","Type":"ContainerStarted","Data":"476c5433877003a9d7a2c4bec1e59dba6fa5df5ca61a3fa01a16c5d5198b625b"} Nov 26 22:34:00 crc kubenswrapper[4903]: I1126 22:34:00.388124 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2t5dn" event={"ID":"419ef374-1487-4796-ab40-0171619f08ef","Type":"ContainerStarted","Data":"ee01ad42432944ffefb026034befb0ba5ed7bfa2c26aef1e5837c2a6543ad5b0"} Nov 26 22:34:01 crc kubenswrapper[4903]: I1126 22:34:01.408896 4903 generic.go:334] "Generic (PLEG): container finished" podID="419ef374-1487-4796-ab40-0171619f08ef" containerID="ee01ad42432944ffefb026034befb0ba5ed7bfa2c26aef1e5837c2a6543ad5b0" exitCode=0 Nov 26 22:34:01 crc kubenswrapper[4903]: I1126 22:34:01.408966 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2t5dn" event={"ID":"419ef374-1487-4796-ab40-0171619f08ef","Type":"ContainerDied","Data":"ee01ad42432944ffefb026034befb0ba5ed7bfa2c26aef1e5837c2a6543ad5b0"} Nov 26 22:34:02 crc kubenswrapper[4903]: I1126 22:34:02.426405 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2t5dn" event={"ID":"419ef374-1487-4796-ab40-0171619f08ef","Type":"ContainerStarted","Data":"206f158048f0f6c6b5c10bba20482f532dd977934224edc84b1c0e0df891df32"} Nov 26 22:34:02 crc kubenswrapper[4903]: I1126 22:34:02.453339 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-2t5dn" podStartSLOduration=1.8261225159999999 podStartE2EDuration="4.453321257s" podCreationTimestamp="2025-11-26 22:33:58 +0000 UTC" firstStartedPulling="2025-11-26 22:33:59.376850934 +0000 UTC m=+768.067085854" lastFinishedPulling="2025-11-26 22:34:02.004049655 +0000 UTC m=+770.694284595" observedRunningTime="2025-11-26 22:34:02.448160417 +0000 UTC m=+771.138395337" watchObservedRunningTime="2025-11-26 22:34:02.453321257 +0000 UTC m=+771.143556177" Nov 26 22:34:03 crc kubenswrapper[4903]: I1126 22:34:03.379200 4903 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: waiting for 15s after being ready Nov 26 22:34:03 crc kubenswrapper[4903]: I1126 22:34:03.379297 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="23d0313e-2bdb-4054-8951-2e29fd19f371" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 26 22:34:07 crc kubenswrapper[4903]: I1126 22:34:07.112618 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-tmgg6"] Nov 26 22:34:07 crc kubenswrapper[4903]: I1126 22:34:07.114926 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tmgg6" Nov 26 22:34:07 crc kubenswrapper[4903]: I1126 22:34:07.152012 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tmgg6"] Nov 26 22:34:07 crc kubenswrapper[4903]: I1126 22:34:07.233380 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f960af9a-0dac-4f20-90b6-5bf7ee326804-catalog-content\") pod \"community-operators-tmgg6\" (UID: \"f960af9a-0dac-4f20-90b6-5bf7ee326804\") " pod="openshift-marketplace/community-operators-tmgg6" Nov 26 22:34:07 crc kubenswrapper[4903]: I1126 22:34:07.233468 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-khbn8\" (UniqueName: \"kubernetes.io/projected/f960af9a-0dac-4f20-90b6-5bf7ee326804-kube-api-access-khbn8\") pod \"community-operators-tmgg6\" (UID: \"f960af9a-0dac-4f20-90b6-5bf7ee326804\") " pod="openshift-marketplace/community-operators-tmgg6" Nov 26 22:34:07 crc kubenswrapper[4903]: I1126 22:34:07.233546 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f960af9a-0dac-4f20-90b6-5bf7ee326804-utilities\") pod \"community-operators-tmgg6\" (UID: \"f960af9a-0dac-4f20-90b6-5bf7ee326804\") " pod="openshift-marketplace/community-operators-tmgg6" Nov 26 22:34:07 crc kubenswrapper[4903]: I1126 22:34:07.335631 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f960af9a-0dac-4f20-90b6-5bf7ee326804-utilities\") pod \"community-operators-tmgg6\" (UID: \"f960af9a-0dac-4f20-90b6-5bf7ee326804\") " pod="openshift-marketplace/community-operators-tmgg6" Nov 26 22:34:07 crc kubenswrapper[4903]: I1126 22:34:07.335859 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f960af9a-0dac-4f20-90b6-5bf7ee326804-catalog-content\") pod \"community-operators-tmgg6\" (UID: \"f960af9a-0dac-4f20-90b6-5bf7ee326804\") " pod="openshift-marketplace/community-operators-tmgg6" Nov 26 22:34:07 crc kubenswrapper[4903]: I1126 22:34:07.335926 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-khbn8\" (UniqueName: \"kubernetes.io/projected/f960af9a-0dac-4f20-90b6-5bf7ee326804-kube-api-access-khbn8\") pod \"community-operators-tmgg6\" (UID: \"f960af9a-0dac-4f20-90b6-5bf7ee326804\") " pod="openshift-marketplace/community-operators-tmgg6" Nov 26 22:34:07 crc kubenswrapper[4903]: I1126 22:34:07.336170 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f960af9a-0dac-4f20-90b6-5bf7ee326804-utilities\") pod \"community-operators-tmgg6\" (UID: \"f960af9a-0dac-4f20-90b6-5bf7ee326804\") " pod="openshift-marketplace/community-operators-tmgg6" Nov 26 22:34:07 crc kubenswrapper[4903]: I1126 22:34:07.336464 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f960af9a-0dac-4f20-90b6-5bf7ee326804-catalog-content\") pod \"community-operators-tmgg6\" (UID: \"f960af9a-0dac-4f20-90b6-5bf7ee326804\") " pod="openshift-marketplace/community-operators-tmgg6" Nov 26 22:34:07 crc kubenswrapper[4903]: I1126 22:34:07.368408 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-khbn8\" (UniqueName: \"kubernetes.io/projected/f960af9a-0dac-4f20-90b6-5bf7ee326804-kube-api-access-khbn8\") pod \"community-operators-tmgg6\" (UID: \"f960af9a-0dac-4f20-90b6-5bf7ee326804\") " pod="openshift-marketplace/community-operators-tmgg6" Nov 26 22:34:07 crc kubenswrapper[4903]: I1126 22:34:07.467527 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tmgg6" Nov 26 22:34:07 crc kubenswrapper[4903]: I1126 22:34:07.962943 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tmgg6"] Nov 26 22:34:08 crc kubenswrapper[4903]: I1126 22:34:08.484187 4903 generic.go:334] "Generic (PLEG): container finished" podID="f960af9a-0dac-4f20-90b6-5bf7ee326804" containerID="828cb86785ba7beb77f81ee952e7f6f4e6c826c01cfa43967c34027f775431a6" exitCode=0 Nov 26 22:34:08 crc kubenswrapper[4903]: I1126 22:34:08.484277 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tmgg6" event={"ID":"f960af9a-0dac-4f20-90b6-5bf7ee326804","Type":"ContainerDied","Data":"828cb86785ba7beb77f81ee952e7f6f4e6c826c01cfa43967c34027f775431a6"} Nov 26 22:34:08 crc kubenswrapper[4903]: I1126 22:34:08.485066 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tmgg6" event={"ID":"f960af9a-0dac-4f20-90b6-5bf7ee326804","Type":"ContainerStarted","Data":"ab2489786e091d2fd37c9bd4b8724780b50502b1aed7d0e21a5b0488f1afd8ea"} Nov 26 22:34:08 crc kubenswrapper[4903]: I1126 22:34:08.544416 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-2t5dn" Nov 26 22:34:08 crc kubenswrapper[4903]: I1126 22:34:08.547767 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-2t5dn" Nov 26 22:34:09 crc kubenswrapper[4903]: I1126 22:34:09.495043 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tmgg6" event={"ID":"f960af9a-0dac-4f20-90b6-5bf7ee326804","Type":"ContainerStarted","Data":"08227915a38fd09cc905e9416266a1b95c38af07bac0db33711fb6c9b294608c"} Nov 26 22:34:09 crc kubenswrapper[4903]: I1126 22:34:09.613540 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-2t5dn" podUID="419ef374-1487-4796-ab40-0171619f08ef" containerName="registry-server" probeResult="failure" output=< Nov 26 22:34:09 crc kubenswrapper[4903]: timeout: failed to connect service ":50051" within 1s Nov 26 22:34:09 crc kubenswrapper[4903]: > Nov 26 22:34:10 crc kubenswrapper[4903]: I1126 22:34:10.506790 4903 generic.go:334] "Generic (PLEG): container finished" podID="f960af9a-0dac-4f20-90b6-5bf7ee326804" containerID="08227915a38fd09cc905e9416266a1b95c38af07bac0db33711fb6c9b294608c" exitCode=0 Nov 26 22:34:10 crc kubenswrapper[4903]: I1126 22:34:10.506930 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tmgg6" event={"ID":"f960af9a-0dac-4f20-90b6-5bf7ee326804","Type":"ContainerDied","Data":"08227915a38fd09cc905e9416266a1b95c38af07bac0db33711fb6c9b294608c"} Nov 26 22:34:11 crc kubenswrapper[4903]: I1126 22:34:11.521030 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tmgg6" event={"ID":"f960af9a-0dac-4f20-90b6-5bf7ee326804","Type":"ContainerStarted","Data":"0594d88a1e7b7c556cfce80cf36b7c77ad4fa4ea198b714a206b1e23f0341852"} Nov 26 22:34:11 crc kubenswrapper[4903]: I1126 22:34:11.551538 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-tmgg6" podStartSLOduration=2.072141874 podStartE2EDuration="4.551514206s" podCreationTimestamp="2025-11-26 22:34:07 +0000 UTC" firstStartedPulling="2025-11-26 22:34:08.486641717 +0000 UTC m=+777.176876637" lastFinishedPulling="2025-11-26 22:34:10.966014019 +0000 UTC m=+779.656248969" observedRunningTime="2025-11-26 22:34:11.547129237 +0000 UTC m=+780.237364217" watchObservedRunningTime="2025-11-26 22:34:11.551514206 +0000 UTC m=+780.241749146" Nov 26 22:34:13 crc kubenswrapper[4903]: I1126 22:34:13.382512 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-ingester-0" Nov 26 22:34:14 crc kubenswrapper[4903]: I1126 22:34:14.108683 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-z9zc4"] Nov 26 22:34:14 crc kubenswrapper[4903]: I1126 22:34:14.112164 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z9zc4" Nov 26 22:34:14 crc kubenswrapper[4903]: I1126 22:34:14.128777 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z9zc4"] Nov 26 22:34:14 crc kubenswrapper[4903]: I1126 22:34:14.262546 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvf8s\" (UniqueName: \"kubernetes.io/projected/3d000e98-2eb6-41e9-943b-294ed7b2243b-kube-api-access-bvf8s\") pod \"certified-operators-z9zc4\" (UID: \"3d000e98-2eb6-41e9-943b-294ed7b2243b\") " pod="openshift-marketplace/certified-operators-z9zc4" Nov 26 22:34:14 crc kubenswrapper[4903]: I1126 22:34:14.262601 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d000e98-2eb6-41e9-943b-294ed7b2243b-catalog-content\") pod \"certified-operators-z9zc4\" (UID: \"3d000e98-2eb6-41e9-943b-294ed7b2243b\") " pod="openshift-marketplace/certified-operators-z9zc4" Nov 26 22:34:14 crc kubenswrapper[4903]: I1126 22:34:14.263081 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d000e98-2eb6-41e9-943b-294ed7b2243b-utilities\") pod \"certified-operators-z9zc4\" (UID: \"3d000e98-2eb6-41e9-943b-294ed7b2243b\") " pod="openshift-marketplace/certified-operators-z9zc4" Nov 26 22:34:14 crc kubenswrapper[4903]: I1126 22:34:14.364850 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d000e98-2eb6-41e9-943b-294ed7b2243b-utilities\") pod \"certified-operators-z9zc4\" (UID: \"3d000e98-2eb6-41e9-943b-294ed7b2243b\") " pod="openshift-marketplace/certified-operators-z9zc4" Nov 26 22:34:14 crc kubenswrapper[4903]: I1126 22:34:14.364957 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvf8s\" (UniqueName: \"kubernetes.io/projected/3d000e98-2eb6-41e9-943b-294ed7b2243b-kube-api-access-bvf8s\") pod \"certified-operators-z9zc4\" (UID: \"3d000e98-2eb6-41e9-943b-294ed7b2243b\") " pod="openshift-marketplace/certified-operators-z9zc4" Nov 26 22:34:14 crc kubenswrapper[4903]: I1126 22:34:14.364981 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d000e98-2eb6-41e9-943b-294ed7b2243b-catalog-content\") pod \"certified-operators-z9zc4\" (UID: \"3d000e98-2eb6-41e9-943b-294ed7b2243b\") " pod="openshift-marketplace/certified-operators-z9zc4" Nov 26 22:34:14 crc kubenswrapper[4903]: I1126 22:34:14.365637 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d000e98-2eb6-41e9-943b-294ed7b2243b-catalog-content\") pod \"certified-operators-z9zc4\" (UID: \"3d000e98-2eb6-41e9-943b-294ed7b2243b\") " pod="openshift-marketplace/certified-operators-z9zc4" Nov 26 22:34:14 crc kubenswrapper[4903]: I1126 22:34:14.365639 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d000e98-2eb6-41e9-943b-294ed7b2243b-utilities\") pod \"certified-operators-z9zc4\" (UID: \"3d000e98-2eb6-41e9-943b-294ed7b2243b\") " pod="openshift-marketplace/certified-operators-z9zc4" Nov 26 22:34:14 crc kubenswrapper[4903]: I1126 22:34:14.392797 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvf8s\" (UniqueName: \"kubernetes.io/projected/3d000e98-2eb6-41e9-943b-294ed7b2243b-kube-api-access-bvf8s\") pod \"certified-operators-z9zc4\" (UID: \"3d000e98-2eb6-41e9-943b-294ed7b2243b\") " pod="openshift-marketplace/certified-operators-z9zc4" Nov 26 22:34:14 crc kubenswrapper[4903]: I1126 22:34:14.491487 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z9zc4" Nov 26 22:34:14 crc kubenswrapper[4903]: I1126 22:34:14.950880 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z9zc4"] Nov 26 22:34:14 crc kubenswrapper[4903]: W1126 22:34:14.958567 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3d000e98_2eb6_41e9_943b_294ed7b2243b.slice/crio-103f9f342f5759789b60a341873ee4fa21ab913671d0ea2d1bca26ae402c759a WatchSource:0}: Error finding container 103f9f342f5759789b60a341873ee4fa21ab913671d0ea2d1bca26ae402c759a: Status 404 returned error can't find the container with id 103f9f342f5759789b60a341873ee4fa21ab913671d0ea2d1bca26ae402c759a Nov 26 22:34:15 crc kubenswrapper[4903]: I1126 22:34:15.557073 4903 generic.go:334] "Generic (PLEG): container finished" podID="3d000e98-2eb6-41e9-943b-294ed7b2243b" containerID="c03cc5766080c6ef0bff977fa69dea5cd42092fc7624986dc88e3339c339f025" exitCode=0 Nov 26 22:34:15 crc kubenswrapper[4903]: I1126 22:34:15.557111 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z9zc4" event={"ID":"3d000e98-2eb6-41e9-943b-294ed7b2243b","Type":"ContainerDied","Data":"c03cc5766080c6ef0bff977fa69dea5cd42092fc7624986dc88e3339c339f025"} Nov 26 22:34:15 crc kubenswrapper[4903]: I1126 22:34:15.557134 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z9zc4" event={"ID":"3d000e98-2eb6-41e9-943b-294ed7b2243b","Type":"ContainerStarted","Data":"103f9f342f5759789b60a341873ee4fa21ab913671d0ea2d1bca26ae402c759a"} Nov 26 22:34:17 crc kubenswrapper[4903]: I1126 22:34:17.468579 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-tmgg6" Nov 26 22:34:17 crc kubenswrapper[4903]: I1126 22:34:17.469296 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-tmgg6" Nov 26 22:34:17 crc kubenswrapper[4903]: I1126 22:34:17.550523 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-tmgg6" Nov 26 22:34:17 crc kubenswrapper[4903]: I1126 22:34:17.576825 4903 generic.go:334] "Generic (PLEG): container finished" podID="3d000e98-2eb6-41e9-943b-294ed7b2243b" containerID="782135ae538eea42fb48ff7ea916cd1133adcac31f66403acc864126cda113b3" exitCode=0 Nov 26 22:34:17 crc kubenswrapper[4903]: I1126 22:34:17.578170 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z9zc4" event={"ID":"3d000e98-2eb6-41e9-943b-294ed7b2243b","Type":"ContainerDied","Data":"782135ae538eea42fb48ff7ea916cd1133adcac31f66403acc864126cda113b3"} Nov 26 22:34:17 crc kubenswrapper[4903]: I1126 22:34:17.653333 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-tmgg6" Nov 26 22:34:18 crc kubenswrapper[4903]: I1126 22:34:18.601830 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z9zc4" event={"ID":"3d000e98-2eb6-41e9-943b-294ed7b2243b","Type":"ContainerStarted","Data":"4337628323fe2089317ee7e01e71f8a52d313ed38b96ce04e29ed2cdf09ba7b7"} Nov 26 22:34:18 crc kubenswrapper[4903]: I1126 22:34:18.617303 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-2t5dn" Nov 26 22:34:18 crc kubenswrapper[4903]: I1126 22:34:18.645187 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-z9zc4" podStartSLOduration=1.951389955 podStartE2EDuration="4.645163295s" podCreationTimestamp="2025-11-26 22:34:14 +0000 UTC" firstStartedPulling="2025-11-26 22:34:15.559855624 +0000 UTC m=+784.250090574" lastFinishedPulling="2025-11-26 22:34:18.253628974 +0000 UTC m=+786.943863914" observedRunningTime="2025-11-26 22:34:18.634794864 +0000 UTC m=+787.325029854" watchObservedRunningTime="2025-11-26 22:34:18.645163295 +0000 UTC m=+787.335398215" Nov 26 22:34:18 crc kubenswrapper[4903]: I1126 22:34:18.698476 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-2t5dn" Nov 26 22:34:19 crc kubenswrapper[4903]: I1126 22:34:19.686995 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tmgg6"] Nov 26 22:34:19 crc kubenswrapper[4903]: I1126 22:34:19.687490 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-tmgg6" podUID="f960af9a-0dac-4f20-90b6-5bf7ee326804" containerName="registry-server" containerID="cri-o://0594d88a1e7b7c556cfce80cf36b7c77ad4fa4ea198b714a206b1e23f0341852" gracePeriod=2 Nov 26 22:34:20 crc kubenswrapper[4903]: I1126 22:34:20.257182 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tmgg6" Nov 26 22:34:20 crc kubenswrapper[4903]: I1126 22:34:20.385789 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f960af9a-0dac-4f20-90b6-5bf7ee326804-catalog-content\") pod \"f960af9a-0dac-4f20-90b6-5bf7ee326804\" (UID: \"f960af9a-0dac-4f20-90b6-5bf7ee326804\") " Nov 26 22:34:20 crc kubenswrapper[4903]: I1126 22:34:20.385850 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f960af9a-0dac-4f20-90b6-5bf7ee326804-utilities\") pod \"f960af9a-0dac-4f20-90b6-5bf7ee326804\" (UID: \"f960af9a-0dac-4f20-90b6-5bf7ee326804\") " Nov 26 22:34:20 crc kubenswrapper[4903]: I1126 22:34:20.385954 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-khbn8\" (UniqueName: \"kubernetes.io/projected/f960af9a-0dac-4f20-90b6-5bf7ee326804-kube-api-access-khbn8\") pod \"f960af9a-0dac-4f20-90b6-5bf7ee326804\" (UID: \"f960af9a-0dac-4f20-90b6-5bf7ee326804\") " Nov 26 22:34:20 crc kubenswrapper[4903]: I1126 22:34:20.387250 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f960af9a-0dac-4f20-90b6-5bf7ee326804-utilities" (OuterVolumeSpecName: "utilities") pod "f960af9a-0dac-4f20-90b6-5bf7ee326804" (UID: "f960af9a-0dac-4f20-90b6-5bf7ee326804"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:34:20 crc kubenswrapper[4903]: I1126 22:34:20.394406 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f960af9a-0dac-4f20-90b6-5bf7ee326804-kube-api-access-khbn8" (OuterVolumeSpecName: "kube-api-access-khbn8") pod "f960af9a-0dac-4f20-90b6-5bf7ee326804" (UID: "f960af9a-0dac-4f20-90b6-5bf7ee326804"). InnerVolumeSpecName "kube-api-access-khbn8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:34:20 crc kubenswrapper[4903]: I1126 22:34:20.488361 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f960af9a-0dac-4f20-90b6-5bf7ee326804-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 22:34:20 crc kubenswrapper[4903]: I1126 22:34:20.488410 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-khbn8\" (UniqueName: \"kubernetes.io/projected/f960af9a-0dac-4f20-90b6-5bf7ee326804-kube-api-access-khbn8\") on node \"crc\" DevicePath \"\"" Nov 26 22:34:20 crc kubenswrapper[4903]: I1126 22:34:20.620640 4903 generic.go:334] "Generic (PLEG): container finished" podID="f960af9a-0dac-4f20-90b6-5bf7ee326804" containerID="0594d88a1e7b7c556cfce80cf36b7c77ad4fa4ea198b714a206b1e23f0341852" exitCode=0 Nov 26 22:34:20 crc kubenswrapper[4903]: I1126 22:34:20.620748 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tmgg6" event={"ID":"f960af9a-0dac-4f20-90b6-5bf7ee326804","Type":"ContainerDied","Data":"0594d88a1e7b7c556cfce80cf36b7c77ad4fa4ea198b714a206b1e23f0341852"} Nov 26 22:34:20 crc kubenswrapper[4903]: I1126 22:34:20.620773 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tmgg6" Nov 26 22:34:20 crc kubenswrapper[4903]: I1126 22:34:20.620817 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tmgg6" event={"ID":"f960af9a-0dac-4f20-90b6-5bf7ee326804","Type":"ContainerDied","Data":"ab2489786e091d2fd37c9bd4b8724780b50502b1aed7d0e21a5b0488f1afd8ea"} Nov 26 22:34:20 crc kubenswrapper[4903]: I1126 22:34:20.620854 4903 scope.go:117] "RemoveContainer" containerID="0594d88a1e7b7c556cfce80cf36b7c77ad4fa4ea198b714a206b1e23f0341852" Nov 26 22:34:20 crc kubenswrapper[4903]: I1126 22:34:20.651340 4903 scope.go:117] "RemoveContainer" containerID="08227915a38fd09cc905e9416266a1b95c38af07bac0db33711fb6c9b294608c" Nov 26 22:34:20 crc kubenswrapper[4903]: I1126 22:34:20.679592 4903 scope.go:117] "RemoveContainer" containerID="828cb86785ba7beb77f81ee952e7f6f4e6c826c01cfa43967c34027f775431a6" Nov 26 22:34:20 crc kubenswrapper[4903]: I1126 22:34:20.714601 4903 scope.go:117] "RemoveContainer" containerID="0594d88a1e7b7c556cfce80cf36b7c77ad4fa4ea198b714a206b1e23f0341852" Nov 26 22:34:20 crc kubenswrapper[4903]: E1126 22:34:20.715224 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0594d88a1e7b7c556cfce80cf36b7c77ad4fa4ea198b714a206b1e23f0341852\": container with ID starting with 0594d88a1e7b7c556cfce80cf36b7c77ad4fa4ea198b714a206b1e23f0341852 not found: ID does not exist" containerID="0594d88a1e7b7c556cfce80cf36b7c77ad4fa4ea198b714a206b1e23f0341852" Nov 26 22:34:20 crc kubenswrapper[4903]: I1126 22:34:20.715452 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0594d88a1e7b7c556cfce80cf36b7c77ad4fa4ea198b714a206b1e23f0341852"} err="failed to get container status \"0594d88a1e7b7c556cfce80cf36b7c77ad4fa4ea198b714a206b1e23f0341852\": rpc error: code = NotFound desc = could not find container \"0594d88a1e7b7c556cfce80cf36b7c77ad4fa4ea198b714a206b1e23f0341852\": container with ID starting with 0594d88a1e7b7c556cfce80cf36b7c77ad4fa4ea198b714a206b1e23f0341852 not found: ID does not exist" Nov 26 22:34:20 crc kubenswrapper[4903]: I1126 22:34:20.715498 4903 scope.go:117] "RemoveContainer" containerID="08227915a38fd09cc905e9416266a1b95c38af07bac0db33711fb6c9b294608c" Nov 26 22:34:20 crc kubenswrapper[4903]: E1126 22:34:20.716105 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"08227915a38fd09cc905e9416266a1b95c38af07bac0db33711fb6c9b294608c\": container with ID starting with 08227915a38fd09cc905e9416266a1b95c38af07bac0db33711fb6c9b294608c not found: ID does not exist" containerID="08227915a38fd09cc905e9416266a1b95c38af07bac0db33711fb6c9b294608c" Nov 26 22:34:20 crc kubenswrapper[4903]: I1126 22:34:20.716146 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"08227915a38fd09cc905e9416266a1b95c38af07bac0db33711fb6c9b294608c"} err="failed to get container status \"08227915a38fd09cc905e9416266a1b95c38af07bac0db33711fb6c9b294608c\": rpc error: code = NotFound desc = could not find container \"08227915a38fd09cc905e9416266a1b95c38af07bac0db33711fb6c9b294608c\": container with ID starting with 08227915a38fd09cc905e9416266a1b95c38af07bac0db33711fb6c9b294608c not found: ID does not exist" Nov 26 22:34:20 crc kubenswrapper[4903]: I1126 22:34:20.716174 4903 scope.go:117] "RemoveContainer" containerID="828cb86785ba7beb77f81ee952e7f6f4e6c826c01cfa43967c34027f775431a6" Nov 26 22:34:20 crc kubenswrapper[4903]: E1126 22:34:20.716618 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"828cb86785ba7beb77f81ee952e7f6f4e6c826c01cfa43967c34027f775431a6\": container with ID starting with 828cb86785ba7beb77f81ee952e7f6f4e6c826c01cfa43967c34027f775431a6 not found: ID does not exist" containerID="828cb86785ba7beb77f81ee952e7f6f4e6c826c01cfa43967c34027f775431a6" Nov 26 22:34:20 crc kubenswrapper[4903]: I1126 22:34:20.716668 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"828cb86785ba7beb77f81ee952e7f6f4e6c826c01cfa43967c34027f775431a6"} err="failed to get container status \"828cb86785ba7beb77f81ee952e7f6f4e6c826c01cfa43967c34027f775431a6\": rpc error: code = NotFound desc = could not find container \"828cb86785ba7beb77f81ee952e7f6f4e6c826c01cfa43967c34027f775431a6\": container with ID starting with 828cb86785ba7beb77f81ee952e7f6f4e6c826c01cfa43967c34027f775431a6 not found: ID does not exist" Nov 26 22:34:20 crc kubenswrapper[4903]: I1126 22:34:20.756315 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f960af9a-0dac-4f20-90b6-5bf7ee326804-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f960af9a-0dac-4f20-90b6-5bf7ee326804" (UID: "f960af9a-0dac-4f20-90b6-5bf7ee326804"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:34:20 crc kubenswrapper[4903]: I1126 22:34:20.792832 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f960af9a-0dac-4f20-90b6-5bf7ee326804-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 22:34:20 crc kubenswrapper[4903]: I1126 22:34:20.971646 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tmgg6"] Nov 26 22:34:20 crc kubenswrapper[4903]: I1126 22:34:20.979618 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-tmgg6"] Nov 26 22:34:21 crc kubenswrapper[4903]: E1126 22:34:21.116255 4903 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf960af9a_0dac_4f20_90b6_5bf7ee326804.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf960af9a_0dac_4f20_90b6_5bf7ee326804.slice/crio-ab2489786e091d2fd37c9bd4b8724780b50502b1aed7d0e21a5b0488f1afd8ea\": RecentStats: unable to find data in memory cache]" Nov 26 22:34:21 crc kubenswrapper[4903]: I1126 22:34:21.885531 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2t5dn"] Nov 26 22:34:21 crc kubenswrapper[4903]: I1126 22:34:21.888015 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-2t5dn" podUID="419ef374-1487-4796-ab40-0171619f08ef" containerName="registry-server" containerID="cri-o://206f158048f0f6c6b5c10bba20482f532dd977934224edc84b1c0e0df891df32" gracePeriod=2 Nov 26 22:34:22 crc kubenswrapper[4903]: I1126 22:34:22.042429 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f960af9a-0dac-4f20-90b6-5bf7ee326804" path="/var/lib/kubelet/pods/f960af9a-0dac-4f20-90b6-5bf7ee326804/volumes" Nov 26 22:34:22 crc kubenswrapper[4903]: I1126 22:34:22.473755 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2t5dn" Nov 26 22:34:22 crc kubenswrapper[4903]: I1126 22:34:22.630002 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z9fcb\" (UniqueName: \"kubernetes.io/projected/419ef374-1487-4796-ab40-0171619f08ef-kube-api-access-z9fcb\") pod \"419ef374-1487-4796-ab40-0171619f08ef\" (UID: \"419ef374-1487-4796-ab40-0171619f08ef\") " Nov 26 22:34:22 crc kubenswrapper[4903]: I1126 22:34:22.630157 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/419ef374-1487-4796-ab40-0171619f08ef-utilities\") pod \"419ef374-1487-4796-ab40-0171619f08ef\" (UID: \"419ef374-1487-4796-ab40-0171619f08ef\") " Nov 26 22:34:22 crc kubenswrapper[4903]: I1126 22:34:22.630272 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/419ef374-1487-4796-ab40-0171619f08ef-catalog-content\") pod \"419ef374-1487-4796-ab40-0171619f08ef\" (UID: \"419ef374-1487-4796-ab40-0171619f08ef\") " Nov 26 22:34:22 crc kubenswrapper[4903]: I1126 22:34:22.632436 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/419ef374-1487-4796-ab40-0171619f08ef-utilities" (OuterVolumeSpecName: "utilities") pod "419ef374-1487-4796-ab40-0171619f08ef" (UID: "419ef374-1487-4796-ab40-0171619f08ef"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:34:22 crc kubenswrapper[4903]: I1126 22:34:22.643321 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/419ef374-1487-4796-ab40-0171619f08ef-kube-api-access-z9fcb" (OuterVolumeSpecName: "kube-api-access-z9fcb") pod "419ef374-1487-4796-ab40-0171619f08ef" (UID: "419ef374-1487-4796-ab40-0171619f08ef"). InnerVolumeSpecName "kube-api-access-z9fcb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:34:22 crc kubenswrapper[4903]: I1126 22:34:22.645782 4903 generic.go:334] "Generic (PLEG): container finished" podID="419ef374-1487-4796-ab40-0171619f08ef" containerID="206f158048f0f6c6b5c10bba20482f532dd977934224edc84b1c0e0df891df32" exitCode=0 Nov 26 22:34:22 crc kubenswrapper[4903]: I1126 22:34:22.645841 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2t5dn" event={"ID":"419ef374-1487-4796-ab40-0171619f08ef","Type":"ContainerDied","Data":"206f158048f0f6c6b5c10bba20482f532dd977934224edc84b1c0e0df891df32"} Nov 26 22:34:22 crc kubenswrapper[4903]: I1126 22:34:22.645879 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2t5dn" event={"ID":"419ef374-1487-4796-ab40-0171619f08ef","Type":"ContainerDied","Data":"476c5433877003a9d7a2c4bec1e59dba6fa5df5ca61a3fa01a16c5d5198b625b"} Nov 26 22:34:22 crc kubenswrapper[4903]: I1126 22:34:22.645908 4903 scope.go:117] "RemoveContainer" containerID="206f158048f0f6c6b5c10bba20482f532dd977934224edc84b1c0e0df891df32" Nov 26 22:34:22 crc kubenswrapper[4903]: I1126 22:34:22.646101 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2t5dn" Nov 26 22:34:22 crc kubenswrapper[4903]: I1126 22:34:22.688432 4903 scope.go:117] "RemoveContainer" containerID="ee01ad42432944ffefb026034befb0ba5ed7bfa2c26aef1e5837c2a6543ad5b0" Nov 26 22:34:22 crc kubenswrapper[4903]: I1126 22:34:22.714525 4903 scope.go:117] "RemoveContainer" containerID="ed4e61cff58cd4ced6a2354aa47b2f9eeceaa46bebf90e948b9ffd851fb13ee9" Nov 26 22:34:22 crc kubenswrapper[4903]: I1126 22:34:22.732971 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/419ef374-1487-4796-ab40-0171619f08ef-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 22:34:22 crc kubenswrapper[4903]: I1126 22:34:22.733002 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z9fcb\" (UniqueName: \"kubernetes.io/projected/419ef374-1487-4796-ab40-0171619f08ef-kube-api-access-z9fcb\") on node \"crc\" DevicePath \"\"" Nov 26 22:34:22 crc kubenswrapper[4903]: I1126 22:34:22.744064 4903 scope.go:117] "RemoveContainer" containerID="206f158048f0f6c6b5c10bba20482f532dd977934224edc84b1c0e0df891df32" Nov 26 22:34:22 crc kubenswrapper[4903]: E1126 22:34:22.744496 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"206f158048f0f6c6b5c10bba20482f532dd977934224edc84b1c0e0df891df32\": container with ID starting with 206f158048f0f6c6b5c10bba20482f532dd977934224edc84b1c0e0df891df32 not found: ID does not exist" containerID="206f158048f0f6c6b5c10bba20482f532dd977934224edc84b1c0e0df891df32" Nov 26 22:34:22 crc kubenswrapper[4903]: I1126 22:34:22.744528 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"206f158048f0f6c6b5c10bba20482f532dd977934224edc84b1c0e0df891df32"} err="failed to get container status \"206f158048f0f6c6b5c10bba20482f532dd977934224edc84b1c0e0df891df32\": rpc error: code = NotFound desc = could not find container \"206f158048f0f6c6b5c10bba20482f532dd977934224edc84b1c0e0df891df32\": container with ID starting with 206f158048f0f6c6b5c10bba20482f532dd977934224edc84b1c0e0df891df32 not found: ID does not exist" Nov 26 22:34:22 crc kubenswrapper[4903]: I1126 22:34:22.744569 4903 scope.go:117] "RemoveContainer" containerID="ee01ad42432944ffefb026034befb0ba5ed7bfa2c26aef1e5837c2a6543ad5b0" Nov 26 22:34:22 crc kubenswrapper[4903]: E1126 22:34:22.744973 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee01ad42432944ffefb026034befb0ba5ed7bfa2c26aef1e5837c2a6543ad5b0\": container with ID starting with ee01ad42432944ffefb026034befb0ba5ed7bfa2c26aef1e5837c2a6543ad5b0 not found: ID does not exist" containerID="ee01ad42432944ffefb026034befb0ba5ed7bfa2c26aef1e5837c2a6543ad5b0" Nov 26 22:34:22 crc kubenswrapper[4903]: I1126 22:34:22.745018 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee01ad42432944ffefb026034befb0ba5ed7bfa2c26aef1e5837c2a6543ad5b0"} err="failed to get container status \"ee01ad42432944ffefb026034befb0ba5ed7bfa2c26aef1e5837c2a6543ad5b0\": rpc error: code = NotFound desc = could not find container \"ee01ad42432944ffefb026034befb0ba5ed7bfa2c26aef1e5837c2a6543ad5b0\": container with ID starting with ee01ad42432944ffefb026034befb0ba5ed7bfa2c26aef1e5837c2a6543ad5b0 not found: ID does not exist" Nov 26 22:34:22 crc kubenswrapper[4903]: I1126 22:34:22.745050 4903 scope.go:117] "RemoveContainer" containerID="ed4e61cff58cd4ced6a2354aa47b2f9eeceaa46bebf90e948b9ffd851fb13ee9" Nov 26 22:34:22 crc kubenswrapper[4903]: E1126 22:34:22.745472 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed4e61cff58cd4ced6a2354aa47b2f9eeceaa46bebf90e948b9ffd851fb13ee9\": container with ID starting with ed4e61cff58cd4ced6a2354aa47b2f9eeceaa46bebf90e948b9ffd851fb13ee9 not found: ID does not exist" containerID="ed4e61cff58cd4ced6a2354aa47b2f9eeceaa46bebf90e948b9ffd851fb13ee9" Nov 26 22:34:22 crc kubenswrapper[4903]: I1126 22:34:22.745503 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed4e61cff58cd4ced6a2354aa47b2f9eeceaa46bebf90e948b9ffd851fb13ee9"} err="failed to get container status \"ed4e61cff58cd4ced6a2354aa47b2f9eeceaa46bebf90e948b9ffd851fb13ee9\": rpc error: code = NotFound desc = could not find container \"ed4e61cff58cd4ced6a2354aa47b2f9eeceaa46bebf90e948b9ffd851fb13ee9\": container with ID starting with ed4e61cff58cd4ced6a2354aa47b2f9eeceaa46bebf90e948b9ffd851fb13ee9 not found: ID does not exist" Nov 26 22:34:22 crc kubenswrapper[4903]: I1126 22:34:22.774985 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/419ef374-1487-4796-ab40-0171619f08ef-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "419ef374-1487-4796-ab40-0171619f08ef" (UID: "419ef374-1487-4796-ab40-0171619f08ef"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:34:22 crc kubenswrapper[4903]: I1126 22:34:22.834010 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/419ef374-1487-4796-ab40-0171619f08ef-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 22:34:22 crc kubenswrapper[4903]: I1126 22:34:22.990231 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2t5dn"] Nov 26 22:34:22 crc kubenswrapper[4903]: I1126 22:34:22.998155 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-2t5dn"] Nov 26 22:34:24 crc kubenswrapper[4903]: I1126 22:34:24.045189 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="419ef374-1487-4796-ab40-0171619f08ef" path="/var/lib/kubelet/pods/419ef374-1487-4796-ab40-0171619f08ef/volumes" Nov 26 22:34:24 crc kubenswrapper[4903]: I1126 22:34:24.492561 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-z9zc4" Nov 26 22:34:24 crc kubenswrapper[4903]: I1126 22:34:24.492981 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-z9zc4" Nov 26 22:34:24 crc kubenswrapper[4903]: I1126 22:34:24.571845 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-z9zc4" Nov 26 22:34:24 crc kubenswrapper[4903]: I1126 22:34:24.746504 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-z9zc4" Nov 26 22:34:26 crc kubenswrapper[4903]: I1126 22:34:26.086785 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z9zc4"] Nov 26 22:34:27 crc kubenswrapper[4903]: I1126 22:34:27.699077 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-z9zc4" podUID="3d000e98-2eb6-41e9-943b-294ed7b2243b" containerName="registry-server" containerID="cri-o://4337628323fe2089317ee7e01e71f8a52d313ed38b96ce04e29ed2cdf09ba7b7" gracePeriod=2 Nov 26 22:34:28 crc kubenswrapper[4903]: I1126 22:34:28.711491 4903 generic.go:334] "Generic (PLEG): container finished" podID="3d000e98-2eb6-41e9-943b-294ed7b2243b" containerID="4337628323fe2089317ee7e01e71f8a52d313ed38b96ce04e29ed2cdf09ba7b7" exitCode=0 Nov 26 22:34:28 crc kubenswrapper[4903]: I1126 22:34:28.711557 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z9zc4" event={"ID":"3d000e98-2eb6-41e9-943b-294ed7b2243b","Type":"ContainerDied","Data":"4337628323fe2089317ee7e01e71f8a52d313ed38b96ce04e29ed2cdf09ba7b7"} Nov 26 22:34:28 crc kubenswrapper[4903]: I1126 22:34:28.711596 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z9zc4" event={"ID":"3d000e98-2eb6-41e9-943b-294ed7b2243b","Type":"ContainerDied","Data":"103f9f342f5759789b60a341873ee4fa21ab913671d0ea2d1bca26ae402c759a"} Nov 26 22:34:28 crc kubenswrapper[4903]: I1126 22:34:28.711616 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="103f9f342f5759789b60a341873ee4fa21ab913671d0ea2d1bca26ae402c759a" Nov 26 22:34:28 crc kubenswrapper[4903]: I1126 22:34:28.727189 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z9zc4" Nov 26 22:34:28 crc kubenswrapper[4903]: I1126 22:34:28.851977 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d000e98-2eb6-41e9-943b-294ed7b2243b-catalog-content\") pod \"3d000e98-2eb6-41e9-943b-294ed7b2243b\" (UID: \"3d000e98-2eb6-41e9-943b-294ed7b2243b\") " Nov 26 22:34:28 crc kubenswrapper[4903]: I1126 22:34:28.852033 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bvf8s\" (UniqueName: \"kubernetes.io/projected/3d000e98-2eb6-41e9-943b-294ed7b2243b-kube-api-access-bvf8s\") pod \"3d000e98-2eb6-41e9-943b-294ed7b2243b\" (UID: \"3d000e98-2eb6-41e9-943b-294ed7b2243b\") " Nov 26 22:34:28 crc kubenswrapper[4903]: I1126 22:34:28.852178 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d000e98-2eb6-41e9-943b-294ed7b2243b-utilities\") pod \"3d000e98-2eb6-41e9-943b-294ed7b2243b\" (UID: \"3d000e98-2eb6-41e9-943b-294ed7b2243b\") " Nov 26 22:34:28 crc kubenswrapper[4903]: I1126 22:34:28.853248 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3d000e98-2eb6-41e9-943b-294ed7b2243b-utilities" (OuterVolumeSpecName: "utilities") pod "3d000e98-2eb6-41e9-943b-294ed7b2243b" (UID: "3d000e98-2eb6-41e9-943b-294ed7b2243b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:34:28 crc kubenswrapper[4903]: I1126 22:34:28.853435 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d000e98-2eb6-41e9-943b-294ed7b2243b-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 22:34:28 crc kubenswrapper[4903]: I1126 22:34:28.859137 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d000e98-2eb6-41e9-943b-294ed7b2243b-kube-api-access-bvf8s" (OuterVolumeSpecName: "kube-api-access-bvf8s") pod "3d000e98-2eb6-41e9-943b-294ed7b2243b" (UID: "3d000e98-2eb6-41e9-943b-294ed7b2243b"). InnerVolumeSpecName "kube-api-access-bvf8s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:34:28 crc kubenswrapper[4903]: I1126 22:34:28.919125 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3d000e98-2eb6-41e9-943b-294ed7b2243b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3d000e98-2eb6-41e9-943b-294ed7b2243b" (UID: "3d000e98-2eb6-41e9-943b-294ed7b2243b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:34:28 crc kubenswrapper[4903]: I1126 22:34:28.955759 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d000e98-2eb6-41e9-943b-294ed7b2243b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 22:34:28 crc kubenswrapper[4903]: I1126 22:34:28.955812 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bvf8s\" (UniqueName: \"kubernetes.io/projected/3d000e98-2eb6-41e9-943b-294ed7b2243b-kube-api-access-bvf8s\") on node \"crc\" DevicePath \"\"" Nov 26 22:34:29 crc kubenswrapper[4903]: I1126 22:34:29.718918 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z9zc4" Nov 26 22:34:29 crc kubenswrapper[4903]: I1126 22:34:29.760915 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z9zc4"] Nov 26 22:34:29 crc kubenswrapper[4903]: I1126 22:34:29.765224 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-z9zc4"] Nov 26 22:34:30 crc kubenswrapper[4903]: I1126 22:34:30.046027 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d000e98-2eb6-41e9-943b-294ed7b2243b" path="/var/lib/kubelet/pods/3d000e98-2eb6-41e9-943b-294ed7b2243b/volumes" Nov 26 22:34:31 crc kubenswrapper[4903]: I1126 22:34:31.981621 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 22:34:31 crc kubenswrapper[4903]: I1126 22:34:31.981726 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.606788 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/collector-pdrks"] Nov 26 22:34:32 crc kubenswrapper[4903]: E1126 22:34:32.607198 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f960af9a-0dac-4f20-90b6-5bf7ee326804" containerName="registry-server" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.607227 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f960af9a-0dac-4f20-90b6-5bf7ee326804" containerName="registry-server" Nov 26 22:34:32 crc kubenswrapper[4903]: E1126 22:34:32.607244 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d000e98-2eb6-41e9-943b-294ed7b2243b" containerName="extract-content" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.607253 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d000e98-2eb6-41e9-943b-294ed7b2243b" containerName="extract-content" Nov 26 22:34:32 crc kubenswrapper[4903]: E1126 22:34:32.607266 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="419ef374-1487-4796-ab40-0171619f08ef" containerName="extract-content" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.607275 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="419ef374-1487-4796-ab40-0171619f08ef" containerName="extract-content" Nov 26 22:34:32 crc kubenswrapper[4903]: E1126 22:34:32.607293 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d000e98-2eb6-41e9-943b-294ed7b2243b" containerName="extract-utilities" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.607300 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d000e98-2eb6-41e9-943b-294ed7b2243b" containerName="extract-utilities" Nov 26 22:34:32 crc kubenswrapper[4903]: E1126 22:34:32.607319 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f960af9a-0dac-4f20-90b6-5bf7ee326804" containerName="extract-content" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.607326 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f960af9a-0dac-4f20-90b6-5bf7ee326804" containerName="extract-content" Nov 26 22:34:32 crc kubenswrapper[4903]: E1126 22:34:32.607335 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f960af9a-0dac-4f20-90b6-5bf7ee326804" containerName="extract-utilities" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.607343 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f960af9a-0dac-4f20-90b6-5bf7ee326804" containerName="extract-utilities" Nov 26 22:34:32 crc kubenswrapper[4903]: E1126 22:34:32.607359 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="419ef374-1487-4796-ab40-0171619f08ef" containerName="registry-server" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.607369 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="419ef374-1487-4796-ab40-0171619f08ef" containerName="registry-server" Nov 26 22:34:32 crc kubenswrapper[4903]: E1126 22:34:32.607380 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d000e98-2eb6-41e9-943b-294ed7b2243b" containerName="registry-server" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.607389 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d000e98-2eb6-41e9-943b-294ed7b2243b" containerName="registry-server" Nov 26 22:34:32 crc kubenswrapper[4903]: E1126 22:34:32.607400 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="419ef374-1487-4796-ab40-0171619f08ef" containerName="extract-utilities" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.607408 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="419ef374-1487-4796-ab40-0171619f08ef" containerName="extract-utilities" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.607538 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d000e98-2eb6-41e9-943b-294ed7b2243b" containerName="registry-server" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.607562 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="f960af9a-0dac-4f20-90b6-5bf7ee326804" containerName="registry-server" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.607577 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="419ef374-1487-4796-ab40-0171619f08ef" containerName="registry-server" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.608185 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.610034 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-syslog-receiver" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.611020 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-metrics" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.611264 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-token" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.611483 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-config" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.620407 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-pdrks"] Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.624289 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-dockercfg-29bb5" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.640841 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-trustbundle" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.683021 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-logging/collector-pdrks"] Nov 26 22:34:32 crc kubenswrapper[4903]: E1126 22:34:32.683806 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[collector-syslog-receiver collector-token config config-openshift-service-cacrt datadir entrypoint kube-api-access-pcn64 metrics sa-token tmp trusted-ca], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openshift-logging/collector-pdrks" podUID="894a172f-c73b-4e58-913d-94fbb21e842e" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.730304 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/894a172f-c73b-4e58-913d-94fbb21e842e-metrics\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.730343 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/894a172f-c73b-4e58-913d-94fbb21e842e-config\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.730380 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/894a172f-c73b-4e58-913d-94fbb21e842e-trusted-ca\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.730406 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/894a172f-c73b-4e58-913d-94fbb21e842e-config-openshift-service-cacrt\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.730535 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/894a172f-c73b-4e58-913d-94fbb21e842e-entrypoint\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.730571 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/894a172f-c73b-4e58-913d-94fbb21e842e-tmp\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.730592 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/894a172f-c73b-4e58-913d-94fbb21e842e-datadir\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.730645 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/894a172f-c73b-4e58-913d-94fbb21e842e-collector-syslog-receiver\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.730743 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/894a172f-c73b-4e58-913d-94fbb21e842e-collector-token\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.730780 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/894a172f-c73b-4e58-913d-94fbb21e842e-sa-token\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.730832 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pcn64\" (UniqueName: \"kubernetes.io/projected/894a172f-c73b-4e58-913d-94fbb21e842e-kube-api-access-pcn64\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.746904 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.755276 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.832633 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/894a172f-c73b-4e58-913d-94fbb21e842e-metrics\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.832709 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/894a172f-c73b-4e58-913d-94fbb21e842e-config\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.832742 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/894a172f-c73b-4e58-913d-94fbb21e842e-trusted-ca\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.832794 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/894a172f-c73b-4e58-913d-94fbb21e842e-config-openshift-service-cacrt\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.832905 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/894a172f-c73b-4e58-913d-94fbb21e842e-entrypoint\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.833783 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/894a172f-c73b-4e58-913d-94fbb21e842e-config-openshift-service-cacrt\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.833857 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/894a172f-c73b-4e58-913d-94fbb21e842e-config\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.833872 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/894a172f-c73b-4e58-913d-94fbb21e842e-tmp\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.833930 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/894a172f-c73b-4e58-913d-94fbb21e842e-trusted-ca\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.833953 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/894a172f-c73b-4e58-913d-94fbb21e842e-datadir\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.833954 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/894a172f-c73b-4e58-913d-94fbb21e842e-entrypoint\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.833991 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/894a172f-c73b-4e58-913d-94fbb21e842e-datadir\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.834094 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/894a172f-c73b-4e58-913d-94fbb21e842e-collector-syslog-receiver\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.834196 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/894a172f-c73b-4e58-913d-94fbb21e842e-collector-token\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.834238 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/894a172f-c73b-4e58-913d-94fbb21e842e-sa-token\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.834292 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pcn64\" (UniqueName: \"kubernetes.io/projected/894a172f-c73b-4e58-913d-94fbb21e842e-kube-api-access-pcn64\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.837485 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/894a172f-c73b-4e58-913d-94fbb21e842e-metrics\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.838190 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/894a172f-c73b-4e58-913d-94fbb21e842e-collector-token\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.838272 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/894a172f-c73b-4e58-913d-94fbb21e842e-tmp\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.838809 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/894a172f-c73b-4e58-913d-94fbb21e842e-collector-syslog-receiver\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.852631 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pcn64\" (UniqueName: \"kubernetes.io/projected/894a172f-c73b-4e58-913d-94fbb21e842e-kube-api-access-pcn64\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.855258 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/894a172f-c73b-4e58-913d-94fbb21e842e-sa-token\") pod \"collector-pdrks\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " pod="openshift-logging/collector-pdrks" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.935388 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/894a172f-c73b-4e58-913d-94fbb21e842e-metrics\") pod \"894a172f-c73b-4e58-913d-94fbb21e842e\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.935518 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/894a172f-c73b-4e58-913d-94fbb21e842e-trusted-ca\") pod \"894a172f-c73b-4e58-913d-94fbb21e842e\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.935650 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/894a172f-c73b-4e58-913d-94fbb21e842e-config-openshift-service-cacrt\") pod \"894a172f-c73b-4e58-913d-94fbb21e842e\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.935737 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/894a172f-c73b-4e58-913d-94fbb21e842e-collector-syslog-receiver\") pod \"894a172f-c73b-4e58-913d-94fbb21e842e\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.935820 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/894a172f-c73b-4e58-913d-94fbb21e842e-entrypoint\") pod \"894a172f-c73b-4e58-913d-94fbb21e842e\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.935928 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/894a172f-c73b-4e58-913d-94fbb21e842e-config\") pod \"894a172f-c73b-4e58-913d-94fbb21e842e\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.936766 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/894a172f-c73b-4e58-913d-94fbb21e842e-entrypoint" (OuterVolumeSpecName: "entrypoint") pod "894a172f-c73b-4e58-913d-94fbb21e842e" (UID: "894a172f-c73b-4e58-913d-94fbb21e842e"). InnerVolumeSpecName "entrypoint". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.936927 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/894a172f-c73b-4e58-913d-94fbb21e842e-config" (OuterVolumeSpecName: "config") pod "894a172f-c73b-4e58-913d-94fbb21e842e" (UID: "894a172f-c73b-4e58-913d-94fbb21e842e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.937356 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/894a172f-c73b-4e58-913d-94fbb21e842e-collector-token\") pod \"894a172f-c73b-4e58-913d-94fbb21e842e\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.937421 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/894a172f-c73b-4e58-913d-94fbb21e842e-tmp\") pod \"894a172f-c73b-4e58-913d-94fbb21e842e\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.937474 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/894a172f-c73b-4e58-913d-94fbb21e842e-datadir\") pod \"894a172f-c73b-4e58-913d-94fbb21e842e\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.938081 4903 reconciler_common.go:293] "Volume detached for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/894a172f-c73b-4e58-913d-94fbb21e842e-entrypoint\") on node \"crc\" DevicePath \"\"" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.938137 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/894a172f-c73b-4e58-913d-94fbb21e842e-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.938191 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/894a172f-c73b-4e58-913d-94fbb21e842e-datadir" (OuterVolumeSpecName: "datadir") pod "894a172f-c73b-4e58-913d-94fbb21e842e" (UID: "894a172f-c73b-4e58-913d-94fbb21e842e"). InnerVolumeSpecName "datadir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.938587 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/894a172f-c73b-4e58-913d-94fbb21e842e-config-openshift-service-cacrt" (OuterVolumeSpecName: "config-openshift-service-cacrt") pod "894a172f-c73b-4e58-913d-94fbb21e842e" (UID: "894a172f-c73b-4e58-913d-94fbb21e842e"). InnerVolumeSpecName "config-openshift-service-cacrt". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.938630 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/894a172f-c73b-4e58-913d-94fbb21e842e-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "894a172f-c73b-4e58-913d-94fbb21e842e" (UID: "894a172f-c73b-4e58-913d-94fbb21e842e"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.939296 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/894a172f-c73b-4e58-913d-94fbb21e842e-metrics" (OuterVolumeSpecName: "metrics") pod "894a172f-c73b-4e58-913d-94fbb21e842e" (UID: "894a172f-c73b-4e58-913d-94fbb21e842e"). InnerVolumeSpecName "metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.940182 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/894a172f-c73b-4e58-913d-94fbb21e842e-collector-token" (OuterVolumeSpecName: "collector-token") pod "894a172f-c73b-4e58-913d-94fbb21e842e" (UID: "894a172f-c73b-4e58-913d-94fbb21e842e"). InnerVolumeSpecName "collector-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.940888 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/894a172f-c73b-4e58-913d-94fbb21e842e-collector-syslog-receiver" (OuterVolumeSpecName: "collector-syslog-receiver") pod "894a172f-c73b-4e58-913d-94fbb21e842e" (UID: "894a172f-c73b-4e58-913d-94fbb21e842e"). InnerVolumeSpecName "collector-syslog-receiver". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:34:32 crc kubenswrapper[4903]: I1126 22:34:32.942794 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/894a172f-c73b-4e58-913d-94fbb21e842e-tmp" (OuterVolumeSpecName: "tmp") pod "894a172f-c73b-4e58-913d-94fbb21e842e" (UID: "894a172f-c73b-4e58-913d-94fbb21e842e"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.039308 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcn64\" (UniqueName: \"kubernetes.io/projected/894a172f-c73b-4e58-913d-94fbb21e842e-kube-api-access-pcn64\") pod \"894a172f-c73b-4e58-913d-94fbb21e842e\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.039543 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/894a172f-c73b-4e58-913d-94fbb21e842e-sa-token\") pod \"894a172f-c73b-4e58-913d-94fbb21e842e\" (UID: \"894a172f-c73b-4e58-913d-94fbb21e842e\") " Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.040327 4903 reconciler_common.go:293] "Volume detached for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/894a172f-c73b-4e58-913d-94fbb21e842e-datadir\") on node \"crc\" DevicePath \"\"" Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.040382 4903 reconciler_common.go:293] "Volume detached for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/894a172f-c73b-4e58-913d-94fbb21e842e-metrics\") on node \"crc\" DevicePath \"\"" Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.040401 4903 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/894a172f-c73b-4e58-913d-94fbb21e842e-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.040429 4903 reconciler_common.go:293] "Volume detached for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/894a172f-c73b-4e58-913d-94fbb21e842e-config-openshift-service-cacrt\") on node \"crc\" DevicePath \"\"" Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.040456 4903 reconciler_common.go:293] "Volume detached for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/894a172f-c73b-4e58-913d-94fbb21e842e-collector-syslog-receiver\") on node \"crc\" DevicePath \"\"" Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.040481 4903 reconciler_common.go:293] "Volume detached for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/894a172f-c73b-4e58-913d-94fbb21e842e-collector-token\") on node \"crc\" DevicePath \"\"" Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.040503 4903 reconciler_common.go:293] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/894a172f-c73b-4e58-913d-94fbb21e842e-tmp\") on node \"crc\" DevicePath \"\"" Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.043476 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/894a172f-c73b-4e58-913d-94fbb21e842e-kube-api-access-pcn64" (OuterVolumeSpecName: "kube-api-access-pcn64") pod "894a172f-c73b-4e58-913d-94fbb21e842e" (UID: "894a172f-c73b-4e58-913d-94fbb21e842e"). InnerVolumeSpecName "kube-api-access-pcn64". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.045091 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/894a172f-c73b-4e58-913d-94fbb21e842e-sa-token" (OuterVolumeSpecName: "sa-token") pod "894a172f-c73b-4e58-913d-94fbb21e842e" (UID: "894a172f-c73b-4e58-913d-94fbb21e842e"). InnerVolumeSpecName "sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.142770 4903 reconciler_common.go:293] "Volume detached for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/894a172f-c73b-4e58-913d-94fbb21e842e-sa-token\") on node \"crc\" DevicePath \"\"" Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.142832 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcn64\" (UniqueName: \"kubernetes.io/projected/894a172f-c73b-4e58-913d-94fbb21e842e-kube-api-access-pcn64\") on node \"crc\" DevicePath \"\"" Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.754146 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-pdrks" Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.826820 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-logging/collector-pdrks"] Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.836644 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-logging/collector-pdrks"] Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.846424 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/collector-xj985"] Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.848451 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-xj985" Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.860727 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-dockercfg-29bb5" Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.860888 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-token" Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.861157 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-metrics" Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.861449 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-xj985"] Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.861515 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-config" Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.862943 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-syslog-receiver" Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.868531 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-trustbundle" Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.964158 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/49456ff3-4275-428d-84cc-25664a331100-tmp\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.964227 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49456ff3-4275-428d-84cc-25664a331100-config\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.964274 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/49456ff3-4275-428d-84cc-25664a331100-collector-token\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.965049 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qd4j7\" (UniqueName: \"kubernetes.io/projected/49456ff3-4275-428d-84cc-25664a331100-kube-api-access-qd4j7\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.965339 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/49456ff3-4275-428d-84cc-25664a331100-datadir\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.965546 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/49456ff3-4275-428d-84cc-25664a331100-collector-syslog-receiver\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.965778 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/49456ff3-4275-428d-84cc-25664a331100-entrypoint\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.966010 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/49456ff3-4275-428d-84cc-25664a331100-config-openshift-service-cacrt\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.966185 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/49456ff3-4275-428d-84cc-25664a331100-sa-token\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.966375 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/49456ff3-4275-428d-84cc-25664a331100-trusted-ca\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:33 crc kubenswrapper[4903]: I1126 22:34:33.966612 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/49456ff3-4275-428d-84cc-25664a331100-metrics\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:34 crc kubenswrapper[4903]: I1126 22:34:34.042022 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="894a172f-c73b-4e58-913d-94fbb21e842e" path="/var/lib/kubelet/pods/894a172f-c73b-4e58-913d-94fbb21e842e/volumes" Nov 26 22:34:34 crc kubenswrapper[4903]: I1126 22:34:34.068345 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/49456ff3-4275-428d-84cc-25664a331100-metrics\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:34 crc kubenswrapper[4903]: I1126 22:34:34.068417 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/49456ff3-4275-428d-84cc-25664a331100-tmp\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:34 crc kubenswrapper[4903]: I1126 22:34:34.068465 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49456ff3-4275-428d-84cc-25664a331100-config\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:34 crc kubenswrapper[4903]: I1126 22:34:34.068508 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/49456ff3-4275-428d-84cc-25664a331100-collector-token\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:34 crc kubenswrapper[4903]: I1126 22:34:34.068558 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qd4j7\" (UniqueName: \"kubernetes.io/projected/49456ff3-4275-428d-84cc-25664a331100-kube-api-access-qd4j7\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:34 crc kubenswrapper[4903]: I1126 22:34:34.068613 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/49456ff3-4275-428d-84cc-25664a331100-datadir\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:34 crc kubenswrapper[4903]: I1126 22:34:34.068659 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/49456ff3-4275-428d-84cc-25664a331100-collector-syslog-receiver\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:34 crc kubenswrapper[4903]: I1126 22:34:34.068741 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/49456ff3-4275-428d-84cc-25664a331100-entrypoint\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:34 crc kubenswrapper[4903]: I1126 22:34:34.068897 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/49456ff3-4275-428d-84cc-25664a331100-config-openshift-service-cacrt\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:34 crc kubenswrapper[4903]: I1126 22:34:34.068950 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/49456ff3-4275-428d-84cc-25664a331100-sa-token\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:34 crc kubenswrapper[4903]: I1126 22:34:34.069044 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/49456ff3-4275-428d-84cc-25664a331100-trusted-ca\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:34 crc kubenswrapper[4903]: I1126 22:34:34.068879 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/49456ff3-4275-428d-84cc-25664a331100-datadir\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:34 crc kubenswrapper[4903]: I1126 22:34:34.069762 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/49456ff3-4275-428d-84cc-25664a331100-entrypoint\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:34 crc kubenswrapper[4903]: I1126 22:34:34.070210 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49456ff3-4275-428d-84cc-25664a331100-config\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:34 crc kubenswrapper[4903]: I1126 22:34:34.070359 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/49456ff3-4275-428d-84cc-25664a331100-config-openshift-service-cacrt\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:34 crc kubenswrapper[4903]: I1126 22:34:34.071741 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/49456ff3-4275-428d-84cc-25664a331100-trusted-ca\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:34 crc kubenswrapper[4903]: I1126 22:34:34.075198 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/49456ff3-4275-428d-84cc-25664a331100-tmp\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:34 crc kubenswrapper[4903]: I1126 22:34:34.076258 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/49456ff3-4275-428d-84cc-25664a331100-collector-token\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:34 crc kubenswrapper[4903]: I1126 22:34:34.081515 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/49456ff3-4275-428d-84cc-25664a331100-collector-syslog-receiver\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:34 crc kubenswrapper[4903]: I1126 22:34:34.083224 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/49456ff3-4275-428d-84cc-25664a331100-metrics\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:34 crc kubenswrapper[4903]: I1126 22:34:34.099252 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qd4j7\" (UniqueName: \"kubernetes.io/projected/49456ff3-4275-428d-84cc-25664a331100-kube-api-access-qd4j7\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:34 crc kubenswrapper[4903]: I1126 22:34:34.101406 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/49456ff3-4275-428d-84cc-25664a331100-sa-token\") pod \"collector-xj985\" (UID: \"49456ff3-4275-428d-84cc-25664a331100\") " pod="openshift-logging/collector-xj985" Nov 26 22:34:34 crc kubenswrapper[4903]: I1126 22:34:34.189943 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-xj985" Nov 26 22:34:34 crc kubenswrapper[4903]: I1126 22:34:34.731921 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-xj985"] Nov 26 22:34:34 crc kubenswrapper[4903]: I1126 22:34:34.766260 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/collector-xj985" event={"ID":"49456ff3-4275-428d-84cc-25664a331100","Type":"ContainerStarted","Data":"5ed75833918fb8beee266b997b8c8e7060839e17ddd9c823dd2cf20b5c978621"} Nov 26 22:34:41 crc kubenswrapper[4903]: I1126 22:34:41.841044 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/collector-xj985" event={"ID":"49456ff3-4275-428d-84cc-25664a331100","Type":"ContainerStarted","Data":"6b4fa3ae5c35008deea54c0fa10bc8fdb70c3c147270e793296f04f349114243"} Nov 26 22:34:41 crc kubenswrapper[4903]: I1126 22:34:41.882115 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/collector-xj985" podStartSLOduration=2.156631019 podStartE2EDuration="8.882077339s" podCreationTimestamp="2025-11-26 22:34:33 +0000 UTC" firstStartedPulling="2025-11-26 22:34:34.741461849 +0000 UTC m=+803.431696789" lastFinishedPulling="2025-11-26 22:34:41.466908169 +0000 UTC m=+810.157143109" observedRunningTime="2025-11-26 22:34:41.873674132 +0000 UTC m=+810.563909042" watchObservedRunningTime="2025-11-26 22:34:41.882077339 +0000 UTC m=+810.572312299" Nov 26 22:34:46 crc kubenswrapper[4903]: I1126 22:34:46.752774 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-5rfl7"] Nov 26 22:34:46 crc kubenswrapper[4903]: I1126 22:34:46.758837 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5rfl7" Nov 26 22:34:46 crc kubenswrapper[4903]: I1126 22:34:46.767016 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5rfl7"] Nov 26 22:34:46 crc kubenswrapper[4903]: I1126 22:34:46.816268 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/918506a2-34a3-4b16-b534-801a81ba1585-utilities\") pod \"redhat-marketplace-5rfl7\" (UID: \"918506a2-34a3-4b16-b534-801a81ba1585\") " pod="openshift-marketplace/redhat-marketplace-5rfl7" Nov 26 22:34:46 crc kubenswrapper[4903]: I1126 22:34:46.816429 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vx2vb\" (UniqueName: \"kubernetes.io/projected/918506a2-34a3-4b16-b534-801a81ba1585-kube-api-access-vx2vb\") pod \"redhat-marketplace-5rfl7\" (UID: \"918506a2-34a3-4b16-b534-801a81ba1585\") " pod="openshift-marketplace/redhat-marketplace-5rfl7" Nov 26 22:34:46 crc kubenswrapper[4903]: I1126 22:34:46.816496 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/918506a2-34a3-4b16-b534-801a81ba1585-catalog-content\") pod \"redhat-marketplace-5rfl7\" (UID: \"918506a2-34a3-4b16-b534-801a81ba1585\") " pod="openshift-marketplace/redhat-marketplace-5rfl7" Nov 26 22:34:46 crc kubenswrapper[4903]: I1126 22:34:46.918573 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/918506a2-34a3-4b16-b534-801a81ba1585-utilities\") pod \"redhat-marketplace-5rfl7\" (UID: \"918506a2-34a3-4b16-b534-801a81ba1585\") " pod="openshift-marketplace/redhat-marketplace-5rfl7" Nov 26 22:34:46 crc kubenswrapper[4903]: I1126 22:34:46.918643 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vx2vb\" (UniqueName: \"kubernetes.io/projected/918506a2-34a3-4b16-b534-801a81ba1585-kube-api-access-vx2vb\") pod \"redhat-marketplace-5rfl7\" (UID: \"918506a2-34a3-4b16-b534-801a81ba1585\") " pod="openshift-marketplace/redhat-marketplace-5rfl7" Nov 26 22:34:46 crc kubenswrapper[4903]: I1126 22:34:46.918674 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/918506a2-34a3-4b16-b534-801a81ba1585-catalog-content\") pod \"redhat-marketplace-5rfl7\" (UID: \"918506a2-34a3-4b16-b534-801a81ba1585\") " pod="openshift-marketplace/redhat-marketplace-5rfl7" Nov 26 22:34:46 crc kubenswrapper[4903]: I1126 22:34:46.919324 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/918506a2-34a3-4b16-b534-801a81ba1585-catalog-content\") pod \"redhat-marketplace-5rfl7\" (UID: \"918506a2-34a3-4b16-b534-801a81ba1585\") " pod="openshift-marketplace/redhat-marketplace-5rfl7" Nov 26 22:34:46 crc kubenswrapper[4903]: I1126 22:34:46.919393 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/918506a2-34a3-4b16-b534-801a81ba1585-utilities\") pod \"redhat-marketplace-5rfl7\" (UID: \"918506a2-34a3-4b16-b534-801a81ba1585\") " pod="openshift-marketplace/redhat-marketplace-5rfl7" Nov 26 22:34:46 crc kubenswrapper[4903]: I1126 22:34:46.939936 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vx2vb\" (UniqueName: \"kubernetes.io/projected/918506a2-34a3-4b16-b534-801a81ba1585-kube-api-access-vx2vb\") pod \"redhat-marketplace-5rfl7\" (UID: \"918506a2-34a3-4b16-b534-801a81ba1585\") " pod="openshift-marketplace/redhat-marketplace-5rfl7" Nov 26 22:34:47 crc kubenswrapper[4903]: I1126 22:34:47.096037 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5rfl7" Nov 26 22:34:47 crc kubenswrapper[4903]: I1126 22:34:47.391621 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5rfl7"] Nov 26 22:34:47 crc kubenswrapper[4903]: I1126 22:34:47.894188 4903 generic.go:334] "Generic (PLEG): container finished" podID="918506a2-34a3-4b16-b534-801a81ba1585" containerID="46151e472290b216f8994736bb59fbdc7f638fc18b2cdf6af31884376b53d681" exitCode=0 Nov 26 22:34:47 crc kubenswrapper[4903]: I1126 22:34:47.894278 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5rfl7" event={"ID":"918506a2-34a3-4b16-b534-801a81ba1585","Type":"ContainerDied","Data":"46151e472290b216f8994736bb59fbdc7f638fc18b2cdf6af31884376b53d681"} Nov 26 22:34:47 crc kubenswrapper[4903]: I1126 22:34:47.896139 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5rfl7" event={"ID":"918506a2-34a3-4b16-b534-801a81ba1585","Type":"ContainerStarted","Data":"f474400cfa6920d451ca7adef50b0042fa4a7f736e1016b262dbb185f062b54f"} Nov 26 22:34:48 crc kubenswrapper[4903]: I1126 22:34:48.908519 4903 generic.go:334] "Generic (PLEG): container finished" podID="918506a2-34a3-4b16-b534-801a81ba1585" containerID="2515697bae6ca8084531dca4c20b80e5c4c5e71a387c1bf0667ca853f35f0783" exitCode=0 Nov 26 22:34:48 crc kubenswrapper[4903]: I1126 22:34:48.908601 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5rfl7" event={"ID":"918506a2-34a3-4b16-b534-801a81ba1585","Type":"ContainerDied","Data":"2515697bae6ca8084531dca4c20b80e5c4c5e71a387c1bf0667ca853f35f0783"} Nov 26 22:34:49 crc kubenswrapper[4903]: I1126 22:34:49.925388 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5rfl7" event={"ID":"918506a2-34a3-4b16-b534-801a81ba1585","Type":"ContainerStarted","Data":"74f963fd5d178d1097144853b243af0a20dff31ee5cfab96fe947643af124a9d"} Nov 26 22:34:49 crc kubenswrapper[4903]: I1126 22:34:49.955399 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-5rfl7" podStartSLOduration=2.239859966 podStartE2EDuration="3.955367337s" podCreationTimestamp="2025-11-26 22:34:46 +0000 UTC" firstStartedPulling="2025-11-26 22:34:47.896492148 +0000 UTC m=+816.586727088" lastFinishedPulling="2025-11-26 22:34:49.611999519 +0000 UTC m=+818.302234459" observedRunningTime="2025-11-26 22:34:49.949797466 +0000 UTC m=+818.640032406" watchObservedRunningTime="2025-11-26 22:34:49.955367337 +0000 UTC m=+818.645602297" Nov 26 22:34:57 crc kubenswrapper[4903]: I1126 22:34:57.096763 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-5rfl7" Nov 26 22:34:57 crc kubenswrapper[4903]: I1126 22:34:57.097561 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-5rfl7" Nov 26 22:34:57 crc kubenswrapper[4903]: I1126 22:34:57.172037 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-5rfl7" Nov 26 22:34:58 crc kubenswrapper[4903]: I1126 22:34:58.060627 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-5rfl7" Nov 26 22:34:58 crc kubenswrapper[4903]: I1126 22:34:58.127348 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5rfl7"] Nov 26 22:35:00 crc kubenswrapper[4903]: I1126 22:35:00.013189 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-5rfl7" podUID="918506a2-34a3-4b16-b534-801a81ba1585" containerName="registry-server" containerID="cri-o://74f963fd5d178d1097144853b243af0a20dff31ee5cfab96fe947643af124a9d" gracePeriod=2 Nov 26 22:35:00 crc kubenswrapper[4903]: I1126 22:35:00.515183 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5rfl7" Nov 26 22:35:00 crc kubenswrapper[4903]: I1126 22:35:00.678465 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vx2vb\" (UniqueName: \"kubernetes.io/projected/918506a2-34a3-4b16-b534-801a81ba1585-kube-api-access-vx2vb\") pod \"918506a2-34a3-4b16-b534-801a81ba1585\" (UID: \"918506a2-34a3-4b16-b534-801a81ba1585\") " Nov 26 22:35:00 crc kubenswrapper[4903]: I1126 22:35:00.678894 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/918506a2-34a3-4b16-b534-801a81ba1585-utilities\") pod \"918506a2-34a3-4b16-b534-801a81ba1585\" (UID: \"918506a2-34a3-4b16-b534-801a81ba1585\") " Nov 26 22:35:00 crc kubenswrapper[4903]: I1126 22:35:00.679097 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/918506a2-34a3-4b16-b534-801a81ba1585-catalog-content\") pod \"918506a2-34a3-4b16-b534-801a81ba1585\" (UID: \"918506a2-34a3-4b16-b534-801a81ba1585\") " Nov 26 22:35:00 crc kubenswrapper[4903]: I1126 22:35:00.679632 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/918506a2-34a3-4b16-b534-801a81ba1585-utilities" (OuterVolumeSpecName: "utilities") pod "918506a2-34a3-4b16-b534-801a81ba1585" (UID: "918506a2-34a3-4b16-b534-801a81ba1585"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:35:00 crc kubenswrapper[4903]: I1126 22:35:00.686277 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/918506a2-34a3-4b16-b534-801a81ba1585-kube-api-access-vx2vb" (OuterVolumeSpecName: "kube-api-access-vx2vb") pod "918506a2-34a3-4b16-b534-801a81ba1585" (UID: "918506a2-34a3-4b16-b534-801a81ba1585"). InnerVolumeSpecName "kube-api-access-vx2vb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:35:00 crc kubenswrapper[4903]: I1126 22:35:00.717385 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/918506a2-34a3-4b16-b534-801a81ba1585-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "918506a2-34a3-4b16-b534-801a81ba1585" (UID: "918506a2-34a3-4b16-b534-801a81ba1585"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:35:00 crc kubenswrapper[4903]: I1126 22:35:00.781224 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/918506a2-34a3-4b16-b534-801a81ba1585-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 22:35:00 crc kubenswrapper[4903]: I1126 22:35:00.781270 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/918506a2-34a3-4b16-b534-801a81ba1585-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 22:35:00 crc kubenswrapper[4903]: I1126 22:35:00.781292 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vx2vb\" (UniqueName: \"kubernetes.io/projected/918506a2-34a3-4b16-b534-801a81ba1585-kube-api-access-vx2vb\") on node \"crc\" DevicePath \"\"" Nov 26 22:35:01 crc kubenswrapper[4903]: I1126 22:35:01.027469 4903 generic.go:334] "Generic (PLEG): container finished" podID="918506a2-34a3-4b16-b534-801a81ba1585" containerID="74f963fd5d178d1097144853b243af0a20dff31ee5cfab96fe947643af124a9d" exitCode=0 Nov 26 22:35:01 crc kubenswrapper[4903]: I1126 22:35:01.027528 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5rfl7" event={"ID":"918506a2-34a3-4b16-b534-801a81ba1585","Type":"ContainerDied","Data":"74f963fd5d178d1097144853b243af0a20dff31ee5cfab96fe947643af124a9d"} Nov 26 22:35:01 crc kubenswrapper[4903]: I1126 22:35:01.027560 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5rfl7" event={"ID":"918506a2-34a3-4b16-b534-801a81ba1585","Type":"ContainerDied","Data":"f474400cfa6920d451ca7adef50b0042fa4a7f736e1016b262dbb185f062b54f"} Nov 26 22:35:01 crc kubenswrapper[4903]: I1126 22:35:01.027587 4903 scope.go:117] "RemoveContainer" containerID="74f963fd5d178d1097144853b243af0a20dff31ee5cfab96fe947643af124a9d" Nov 26 22:35:01 crc kubenswrapper[4903]: I1126 22:35:01.027780 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5rfl7" Nov 26 22:35:01 crc kubenswrapper[4903]: I1126 22:35:01.062654 4903 scope.go:117] "RemoveContainer" containerID="2515697bae6ca8084531dca4c20b80e5c4c5e71a387c1bf0667ca853f35f0783" Nov 26 22:35:01 crc kubenswrapper[4903]: I1126 22:35:01.086720 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5rfl7"] Nov 26 22:35:01 crc kubenswrapper[4903]: I1126 22:35:01.095828 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-5rfl7"] Nov 26 22:35:01 crc kubenswrapper[4903]: I1126 22:35:01.114284 4903 scope.go:117] "RemoveContainer" containerID="46151e472290b216f8994736bb59fbdc7f638fc18b2cdf6af31884376b53d681" Nov 26 22:35:01 crc kubenswrapper[4903]: I1126 22:35:01.141020 4903 scope.go:117] "RemoveContainer" containerID="74f963fd5d178d1097144853b243af0a20dff31ee5cfab96fe947643af124a9d" Nov 26 22:35:01 crc kubenswrapper[4903]: E1126 22:35:01.141513 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74f963fd5d178d1097144853b243af0a20dff31ee5cfab96fe947643af124a9d\": container with ID starting with 74f963fd5d178d1097144853b243af0a20dff31ee5cfab96fe947643af124a9d not found: ID does not exist" containerID="74f963fd5d178d1097144853b243af0a20dff31ee5cfab96fe947643af124a9d" Nov 26 22:35:01 crc kubenswrapper[4903]: I1126 22:35:01.141569 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74f963fd5d178d1097144853b243af0a20dff31ee5cfab96fe947643af124a9d"} err="failed to get container status \"74f963fd5d178d1097144853b243af0a20dff31ee5cfab96fe947643af124a9d\": rpc error: code = NotFound desc = could not find container \"74f963fd5d178d1097144853b243af0a20dff31ee5cfab96fe947643af124a9d\": container with ID starting with 74f963fd5d178d1097144853b243af0a20dff31ee5cfab96fe947643af124a9d not found: ID does not exist" Nov 26 22:35:01 crc kubenswrapper[4903]: I1126 22:35:01.141606 4903 scope.go:117] "RemoveContainer" containerID="2515697bae6ca8084531dca4c20b80e5c4c5e71a387c1bf0667ca853f35f0783" Nov 26 22:35:01 crc kubenswrapper[4903]: E1126 22:35:01.142145 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2515697bae6ca8084531dca4c20b80e5c4c5e71a387c1bf0667ca853f35f0783\": container with ID starting with 2515697bae6ca8084531dca4c20b80e5c4c5e71a387c1bf0667ca853f35f0783 not found: ID does not exist" containerID="2515697bae6ca8084531dca4c20b80e5c4c5e71a387c1bf0667ca853f35f0783" Nov 26 22:35:01 crc kubenswrapper[4903]: I1126 22:35:01.142194 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2515697bae6ca8084531dca4c20b80e5c4c5e71a387c1bf0667ca853f35f0783"} err="failed to get container status \"2515697bae6ca8084531dca4c20b80e5c4c5e71a387c1bf0667ca853f35f0783\": rpc error: code = NotFound desc = could not find container \"2515697bae6ca8084531dca4c20b80e5c4c5e71a387c1bf0667ca853f35f0783\": container with ID starting with 2515697bae6ca8084531dca4c20b80e5c4c5e71a387c1bf0667ca853f35f0783 not found: ID does not exist" Nov 26 22:35:01 crc kubenswrapper[4903]: I1126 22:35:01.142262 4903 scope.go:117] "RemoveContainer" containerID="46151e472290b216f8994736bb59fbdc7f638fc18b2cdf6af31884376b53d681" Nov 26 22:35:01 crc kubenswrapper[4903]: E1126 22:35:01.142979 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46151e472290b216f8994736bb59fbdc7f638fc18b2cdf6af31884376b53d681\": container with ID starting with 46151e472290b216f8994736bb59fbdc7f638fc18b2cdf6af31884376b53d681 not found: ID does not exist" containerID="46151e472290b216f8994736bb59fbdc7f638fc18b2cdf6af31884376b53d681" Nov 26 22:35:01 crc kubenswrapper[4903]: I1126 22:35:01.143053 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46151e472290b216f8994736bb59fbdc7f638fc18b2cdf6af31884376b53d681"} err="failed to get container status \"46151e472290b216f8994736bb59fbdc7f638fc18b2cdf6af31884376b53d681\": rpc error: code = NotFound desc = could not find container \"46151e472290b216f8994736bb59fbdc7f638fc18b2cdf6af31884376b53d681\": container with ID starting with 46151e472290b216f8994736bb59fbdc7f638fc18b2cdf6af31884376b53d681 not found: ID does not exist" Nov 26 22:35:01 crc kubenswrapper[4903]: I1126 22:35:01.981426 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 22:35:01 crc kubenswrapper[4903]: I1126 22:35:01.981866 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 22:35:02 crc kubenswrapper[4903]: I1126 22:35:02.046662 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="918506a2-34a3-4b16-b534-801a81ba1585" path="/var/lib/kubelet/pods/918506a2-34a3-4b16-b534-801a81ba1585/volumes" Nov 26 22:35:11 crc kubenswrapper[4903]: I1126 22:35:11.828012 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8"] Nov 26 22:35:11 crc kubenswrapper[4903]: E1126 22:35:11.829660 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="918506a2-34a3-4b16-b534-801a81ba1585" containerName="extract-utilities" Nov 26 22:35:11 crc kubenswrapper[4903]: I1126 22:35:11.829759 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="918506a2-34a3-4b16-b534-801a81ba1585" containerName="extract-utilities" Nov 26 22:35:11 crc kubenswrapper[4903]: E1126 22:35:11.829821 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="918506a2-34a3-4b16-b534-801a81ba1585" containerName="extract-content" Nov 26 22:35:11 crc kubenswrapper[4903]: I1126 22:35:11.829888 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="918506a2-34a3-4b16-b534-801a81ba1585" containerName="extract-content" Nov 26 22:35:11 crc kubenswrapper[4903]: E1126 22:35:11.829953 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="918506a2-34a3-4b16-b534-801a81ba1585" containerName="registry-server" Nov 26 22:35:11 crc kubenswrapper[4903]: I1126 22:35:11.830027 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="918506a2-34a3-4b16-b534-801a81ba1585" containerName="registry-server" Nov 26 22:35:11 crc kubenswrapper[4903]: I1126 22:35:11.830212 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="918506a2-34a3-4b16-b534-801a81ba1585" containerName="registry-server" Nov 26 22:35:11 crc kubenswrapper[4903]: I1126 22:35:11.831182 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8" Nov 26 22:35:11 crc kubenswrapper[4903]: I1126 22:35:11.833333 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 26 22:35:11 crc kubenswrapper[4903]: I1126 22:35:11.837579 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8"] Nov 26 22:35:11 crc kubenswrapper[4903]: I1126 22:35:11.894457 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cswk2\" (UniqueName: \"kubernetes.io/projected/7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f-kube-api-access-cswk2\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8\" (UID: \"7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8" Nov 26 22:35:11 crc kubenswrapper[4903]: I1126 22:35:11.894507 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8\" (UID: \"7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8" Nov 26 22:35:11 crc kubenswrapper[4903]: I1126 22:35:11.894628 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8\" (UID: \"7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8" Nov 26 22:35:11 crc kubenswrapper[4903]: I1126 22:35:11.996120 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8\" (UID: \"7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8" Nov 26 22:35:11 crc kubenswrapper[4903]: I1126 22:35:11.996326 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cswk2\" (UniqueName: \"kubernetes.io/projected/7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f-kube-api-access-cswk2\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8\" (UID: \"7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8" Nov 26 22:35:11 crc kubenswrapper[4903]: I1126 22:35:11.996397 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8\" (UID: \"7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8" Nov 26 22:35:11 crc kubenswrapper[4903]: I1126 22:35:11.996593 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8\" (UID: \"7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8" Nov 26 22:35:11 crc kubenswrapper[4903]: I1126 22:35:11.997523 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8\" (UID: \"7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8" Nov 26 22:35:12 crc kubenswrapper[4903]: I1126 22:35:12.014428 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cswk2\" (UniqueName: \"kubernetes.io/projected/7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f-kube-api-access-cswk2\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8\" (UID: \"7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8" Nov 26 22:35:12 crc kubenswrapper[4903]: I1126 22:35:12.150621 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 26 22:35:12 crc kubenswrapper[4903]: I1126 22:35:12.159447 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8" Nov 26 22:35:12 crc kubenswrapper[4903]: I1126 22:35:12.633753 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8"] Nov 26 22:35:13 crc kubenswrapper[4903]: I1126 22:35:13.136951 4903 generic.go:334] "Generic (PLEG): container finished" podID="7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f" containerID="91fbf9ed84c3947059aa6c84be65885193bdf82f2116a7f64186112c164a7f85" exitCode=0 Nov 26 22:35:13 crc kubenswrapper[4903]: I1126 22:35:13.137056 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8" event={"ID":"7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f","Type":"ContainerDied","Data":"91fbf9ed84c3947059aa6c84be65885193bdf82f2116a7f64186112c164a7f85"} Nov 26 22:35:13 crc kubenswrapper[4903]: I1126 22:35:13.137159 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8" event={"ID":"7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f","Type":"ContainerStarted","Data":"4d303c76543266db512fa65fe4b8981fd10e60d3a9e49890b3121425c8c050a1"} Nov 26 22:35:15 crc kubenswrapper[4903]: I1126 22:35:15.159279 4903 generic.go:334] "Generic (PLEG): container finished" podID="7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f" containerID="b32a5c1d687f58f4a5534b48bdd817d5deba11380686d6ca48166bfee3eab83b" exitCode=0 Nov 26 22:35:15 crc kubenswrapper[4903]: I1126 22:35:15.159439 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8" event={"ID":"7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f","Type":"ContainerDied","Data":"b32a5c1d687f58f4a5534b48bdd817d5deba11380686d6ca48166bfee3eab83b"} Nov 26 22:35:16 crc kubenswrapper[4903]: I1126 22:35:16.175882 4903 generic.go:334] "Generic (PLEG): container finished" podID="7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f" containerID="2b43c320ab882d1ceed50c1b8d3d31f36d766a8c7af0237fb6e4288a3ac6b9fa" exitCode=0 Nov 26 22:35:16 crc kubenswrapper[4903]: I1126 22:35:16.175943 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8" event={"ID":"7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f","Type":"ContainerDied","Data":"2b43c320ab882d1ceed50c1b8d3d31f36d766a8c7af0237fb6e4288a3ac6b9fa"} Nov 26 22:35:17 crc kubenswrapper[4903]: I1126 22:35:17.557241 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8" Nov 26 22:35:17 crc kubenswrapper[4903]: I1126 22:35:17.703400 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f-bundle\") pod \"7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f\" (UID: \"7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f\") " Nov 26 22:35:17 crc kubenswrapper[4903]: I1126 22:35:17.703992 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f-util\") pod \"7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f\" (UID: \"7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f\") " Nov 26 22:35:17 crc kubenswrapper[4903]: I1126 22:35:17.704039 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cswk2\" (UniqueName: \"kubernetes.io/projected/7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f-kube-api-access-cswk2\") pod \"7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f\" (UID: \"7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f\") " Nov 26 22:35:17 crc kubenswrapper[4903]: I1126 22:35:17.704645 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f-bundle" (OuterVolumeSpecName: "bundle") pod "7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f" (UID: "7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:35:17 crc kubenswrapper[4903]: I1126 22:35:17.713044 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f-kube-api-access-cswk2" (OuterVolumeSpecName: "kube-api-access-cswk2") pod "7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f" (UID: "7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f"). InnerVolumeSpecName "kube-api-access-cswk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:35:17 crc kubenswrapper[4903]: I1126 22:35:17.725488 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f-util" (OuterVolumeSpecName: "util") pod "7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f" (UID: "7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:35:17 crc kubenswrapper[4903]: I1126 22:35:17.806939 4903 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:35:17 crc kubenswrapper[4903]: I1126 22:35:17.807012 4903 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f-util\") on node \"crc\" DevicePath \"\"" Nov 26 22:35:17 crc kubenswrapper[4903]: I1126 22:35:17.807039 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cswk2\" (UniqueName: \"kubernetes.io/projected/7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f-kube-api-access-cswk2\") on node \"crc\" DevicePath \"\"" Nov 26 22:35:18 crc kubenswrapper[4903]: I1126 22:35:18.194772 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8" event={"ID":"7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f","Type":"ContainerDied","Data":"4d303c76543266db512fa65fe4b8981fd10e60d3a9e49890b3121425c8c050a1"} Nov 26 22:35:18 crc kubenswrapper[4903]: I1126 22:35:18.194815 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4d303c76543266db512fa65fe4b8981fd10e60d3a9e49890b3121425c8c050a1" Nov 26 22:35:18 crc kubenswrapper[4903]: I1126 22:35:18.194836 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8" Nov 26 22:35:22 crc kubenswrapper[4903]: I1126 22:35:22.208292 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-btnn5"] Nov 26 22:35:22 crc kubenswrapper[4903]: E1126 22:35:22.209199 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f" containerName="extract" Nov 26 22:35:22 crc kubenswrapper[4903]: I1126 22:35:22.209220 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f" containerName="extract" Nov 26 22:35:22 crc kubenswrapper[4903]: E1126 22:35:22.209259 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f" containerName="pull" Nov 26 22:35:22 crc kubenswrapper[4903]: I1126 22:35:22.209272 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f" containerName="pull" Nov 26 22:35:22 crc kubenswrapper[4903]: E1126 22:35:22.209291 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f" containerName="util" Nov 26 22:35:22 crc kubenswrapper[4903]: I1126 22:35:22.209303 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f" containerName="util" Nov 26 22:35:22 crc kubenswrapper[4903]: I1126 22:35:22.209527 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f" containerName="extract" Nov 26 22:35:22 crc kubenswrapper[4903]: I1126 22:35:22.210406 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-btnn5" Nov 26 22:35:22 crc kubenswrapper[4903]: I1126 22:35:22.219742 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-kr9b7" Nov 26 22:35:22 crc kubenswrapper[4903]: I1126 22:35:22.219909 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 26 22:35:22 crc kubenswrapper[4903]: I1126 22:35:22.227086 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-btnn5"] Nov 26 22:35:22 crc kubenswrapper[4903]: I1126 22:35:22.243142 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 26 22:35:22 crc kubenswrapper[4903]: I1126 22:35:22.395041 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zplnf\" (UniqueName: \"kubernetes.io/projected/f0eef496-9727-4ee7-9c31-c2afcb9303c6-kube-api-access-zplnf\") pod \"nmstate-operator-557fdffb88-btnn5\" (UID: \"f0eef496-9727-4ee7-9c31-c2afcb9303c6\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-btnn5" Nov 26 22:35:22 crc kubenswrapper[4903]: I1126 22:35:22.496068 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zplnf\" (UniqueName: \"kubernetes.io/projected/f0eef496-9727-4ee7-9c31-c2afcb9303c6-kube-api-access-zplnf\") pod \"nmstate-operator-557fdffb88-btnn5\" (UID: \"f0eef496-9727-4ee7-9c31-c2afcb9303c6\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-btnn5" Nov 26 22:35:22 crc kubenswrapper[4903]: I1126 22:35:22.525573 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zplnf\" (UniqueName: \"kubernetes.io/projected/f0eef496-9727-4ee7-9c31-c2afcb9303c6-kube-api-access-zplnf\") pod \"nmstate-operator-557fdffb88-btnn5\" (UID: \"f0eef496-9727-4ee7-9c31-c2afcb9303c6\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-btnn5" Nov 26 22:35:22 crc kubenswrapper[4903]: I1126 22:35:22.568811 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-btnn5" Nov 26 22:35:22 crc kubenswrapper[4903]: I1126 22:35:22.850114 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-btnn5"] Nov 26 22:35:23 crc kubenswrapper[4903]: I1126 22:35:23.248409 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-btnn5" event={"ID":"f0eef496-9727-4ee7-9c31-c2afcb9303c6","Type":"ContainerStarted","Data":"b5e964ef175c729336898674e21f35bbba4e972bb274657ebb64b55e378bd68c"} Nov 26 22:35:26 crc kubenswrapper[4903]: I1126 22:35:26.271379 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-btnn5" event={"ID":"f0eef496-9727-4ee7-9c31-c2afcb9303c6","Type":"ContainerStarted","Data":"fd1af25c5bae5d52241e330c0ae766ae2639da3bfa1be5e19900efc2a932d7d7"} Nov 26 22:35:26 crc kubenswrapper[4903]: I1126 22:35:26.295439 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-557fdffb88-btnn5" podStartSLOduration=1.603363895 podStartE2EDuration="4.29541707s" podCreationTimestamp="2025-11-26 22:35:22 +0000 UTC" firstStartedPulling="2025-11-26 22:35:22.861385325 +0000 UTC m=+851.551620235" lastFinishedPulling="2025-11-26 22:35:25.5534385 +0000 UTC m=+854.243673410" observedRunningTime="2025-11-26 22:35:26.288362492 +0000 UTC m=+854.978597412" watchObservedRunningTime="2025-11-26 22:35:26.29541707 +0000 UTC m=+854.985652010" Nov 26 22:35:31 crc kubenswrapper[4903]: I1126 22:35:31.980954 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 22:35:31 crc kubenswrapper[4903]: I1126 22:35:31.981748 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 22:35:31 crc kubenswrapper[4903]: I1126 22:35:31.981820 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 22:35:31 crc kubenswrapper[4903]: I1126 22:35:31.982847 4903 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f36c7ac66ee1d12afd427e767b1119231b90a975fb7c25821f106b8b5f5dcac1"} pod="openshift-machine-config-operator/machine-config-daemon-wjwph" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 22:35:31 crc kubenswrapper[4903]: I1126 22:35:31.982947 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" containerID="cri-o://f36c7ac66ee1d12afd427e767b1119231b90a975fb7c25821f106b8b5f5dcac1" gracePeriod=600 Nov 26 22:35:32 crc kubenswrapper[4903]: I1126 22:35:32.324662 4903 generic.go:334] "Generic (PLEG): container finished" podID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerID="f36c7ac66ee1d12afd427e767b1119231b90a975fb7c25821f106b8b5f5dcac1" exitCode=0 Nov 26 22:35:32 crc kubenswrapper[4903]: I1126 22:35:32.324966 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerDied","Data":"f36c7ac66ee1d12afd427e767b1119231b90a975fb7c25821f106b8b5f5dcac1"} Nov 26 22:35:32 crc kubenswrapper[4903]: I1126 22:35:32.325003 4903 scope.go:117] "RemoveContainer" containerID="2aaaca3e91746b5d4a5250d027be0943958bdff764bee382a723226bf824aef4" Nov 26 22:35:32 crc kubenswrapper[4903]: I1126 22:35:32.954856 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-ctqlb"] Nov 26 22:35:32 crc kubenswrapper[4903]: I1126 22:35:32.956277 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-ctqlb" Nov 26 22:35:32 crc kubenswrapper[4903]: W1126 22:35:32.958181 4903 reflector.go:561] object-"openshift-nmstate"/"nmstate-handler-dockercfg-lvs9v": failed to list *v1.Secret: secrets "nmstate-handler-dockercfg-lvs9v" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-nmstate": no relationship found between node 'crc' and this object Nov 26 22:35:32 crc kubenswrapper[4903]: E1126 22:35:32.958230 4903 reflector.go:158] "Unhandled Error" err="object-\"openshift-nmstate\"/\"nmstate-handler-dockercfg-lvs9v\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"nmstate-handler-dockercfg-lvs9v\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-nmstate\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 26 22:35:32 crc kubenswrapper[4903]: I1126 22:35:32.969494 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-9rhdl"] Nov 26 22:35:32 crc kubenswrapper[4903]: I1126 22:35:32.970348 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-9rhdl" Nov 26 22:35:32 crc kubenswrapper[4903]: I1126 22:35:32.971762 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 26 22:35:32 crc kubenswrapper[4903]: I1126 22:35:32.973308 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-ctqlb"] Nov 26 22:35:32 crc kubenswrapper[4903]: I1126 22:35:32.981760 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-9rhdl"] Nov 26 22:35:32 crc kubenswrapper[4903]: I1126 22:35:32.999038 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-v5kzk"] Nov 26 22:35:32 crc kubenswrapper[4903]: I1126 22:35:32.999853 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-v5kzk" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.074537 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/097195ec-5a3f-4d57-b864-264165398ff6-dbus-socket\") pod \"nmstate-handler-v5kzk\" (UID: \"097195ec-5a3f-4d57-b864-264165398ff6\") " pod="openshift-nmstate/nmstate-handler-v5kzk" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.074605 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/097195ec-5a3f-4d57-b864-264165398ff6-nmstate-lock\") pod \"nmstate-handler-v5kzk\" (UID: \"097195ec-5a3f-4d57-b864-264165398ff6\") " pod="openshift-nmstate/nmstate-handler-v5kzk" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.074910 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/097195ec-5a3f-4d57-b864-264165398ff6-ovs-socket\") pod \"nmstate-handler-v5kzk\" (UID: \"097195ec-5a3f-4d57-b864-264165398ff6\") " pod="openshift-nmstate/nmstate-handler-v5kzk" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.075141 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9jsq\" (UniqueName: \"kubernetes.io/projected/097195ec-5a3f-4d57-b864-264165398ff6-kube-api-access-n9jsq\") pod \"nmstate-handler-v5kzk\" (UID: \"097195ec-5a3f-4d57-b864-264165398ff6\") " pod="openshift-nmstate/nmstate-handler-v5kzk" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.075271 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/8ebd57a0-155f-481c-9d2e-11c69e14b6fc-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-9rhdl\" (UID: \"8ebd57a0-155f-481c-9d2e-11c69e14b6fc\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-9rhdl" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.075317 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-774s6\" (UniqueName: \"kubernetes.io/projected/500a4a0f-2474-482b-9f47-7304d9bd35e9-kube-api-access-774s6\") pod \"nmstate-metrics-5dcf9c57c5-ctqlb\" (UID: \"500a4a0f-2474-482b-9f47-7304d9bd35e9\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-ctqlb" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.075419 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7rcg\" (UniqueName: \"kubernetes.io/projected/8ebd57a0-155f-481c-9d2e-11c69e14b6fc-kube-api-access-l7rcg\") pod \"nmstate-webhook-6b89b748d8-9rhdl\" (UID: \"8ebd57a0-155f-481c-9d2e-11c69e14b6fc\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-9rhdl" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.107685 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-hpzbl"] Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.108582 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-hpzbl" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.111972 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.112225 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-d6t7w" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.112404 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.118826 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-hpzbl"] Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.176901 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7rcg\" (UniqueName: \"kubernetes.io/projected/8ebd57a0-155f-481c-9d2e-11c69e14b6fc-kube-api-access-l7rcg\") pod \"nmstate-webhook-6b89b748d8-9rhdl\" (UID: \"8ebd57a0-155f-481c-9d2e-11c69e14b6fc\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-9rhdl" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.176958 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/097195ec-5a3f-4d57-b864-264165398ff6-dbus-socket\") pod \"nmstate-handler-v5kzk\" (UID: \"097195ec-5a3f-4d57-b864-264165398ff6\") " pod="openshift-nmstate/nmstate-handler-v5kzk" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.176996 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jxpj2\" (UniqueName: \"kubernetes.io/projected/872167df-4435-42c4-9503-8bfca809574f-kube-api-access-jxpj2\") pod \"nmstate-console-plugin-5874bd7bc5-hpzbl\" (UID: \"872167df-4435-42c4-9503-8bfca809574f\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-hpzbl" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.177026 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/097195ec-5a3f-4d57-b864-264165398ff6-nmstate-lock\") pod \"nmstate-handler-v5kzk\" (UID: \"097195ec-5a3f-4d57-b864-264165398ff6\") " pod="openshift-nmstate/nmstate-handler-v5kzk" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.177093 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/097195ec-5a3f-4d57-b864-264165398ff6-ovs-socket\") pod \"nmstate-handler-v5kzk\" (UID: \"097195ec-5a3f-4d57-b864-264165398ff6\") " pod="openshift-nmstate/nmstate-handler-v5kzk" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.177111 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/097195ec-5a3f-4d57-b864-264165398ff6-nmstate-lock\") pod \"nmstate-handler-v5kzk\" (UID: \"097195ec-5a3f-4d57-b864-264165398ff6\") " pod="openshift-nmstate/nmstate-handler-v5kzk" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.177122 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9jsq\" (UniqueName: \"kubernetes.io/projected/097195ec-5a3f-4d57-b864-264165398ff6-kube-api-access-n9jsq\") pod \"nmstate-handler-v5kzk\" (UID: \"097195ec-5a3f-4d57-b864-264165398ff6\") " pod="openshift-nmstate/nmstate-handler-v5kzk" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.177255 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/8ebd57a0-155f-481c-9d2e-11c69e14b6fc-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-9rhdl\" (UID: \"8ebd57a0-155f-481c-9d2e-11c69e14b6fc\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-9rhdl" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.177296 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/872167df-4435-42c4-9503-8bfca809574f-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-hpzbl\" (UID: \"872167df-4435-42c4-9503-8bfca809574f\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-hpzbl" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.177320 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-774s6\" (UniqueName: \"kubernetes.io/projected/500a4a0f-2474-482b-9f47-7304d9bd35e9-kube-api-access-774s6\") pod \"nmstate-metrics-5dcf9c57c5-ctqlb\" (UID: \"500a4a0f-2474-482b-9f47-7304d9bd35e9\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-ctqlb" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.177322 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/097195ec-5a3f-4d57-b864-264165398ff6-ovs-socket\") pod \"nmstate-handler-v5kzk\" (UID: \"097195ec-5a3f-4d57-b864-264165398ff6\") " pod="openshift-nmstate/nmstate-handler-v5kzk" Nov 26 22:35:33 crc kubenswrapper[4903]: E1126 22:35:33.177353 4903 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.177404 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/872167df-4435-42c4-9503-8bfca809574f-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-hpzbl\" (UID: \"872167df-4435-42c4-9503-8bfca809574f\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-hpzbl" Nov 26 22:35:33 crc kubenswrapper[4903]: E1126 22:35:33.177419 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8ebd57a0-155f-481c-9d2e-11c69e14b6fc-tls-key-pair podName:8ebd57a0-155f-481c-9d2e-11c69e14b6fc nodeName:}" failed. No retries permitted until 2025-11-26 22:35:33.677400063 +0000 UTC m=+862.367634973 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/8ebd57a0-155f-481c-9d2e-11c69e14b6fc-tls-key-pair") pod "nmstate-webhook-6b89b748d8-9rhdl" (UID: "8ebd57a0-155f-481c-9d2e-11c69e14b6fc") : secret "openshift-nmstate-webhook" not found Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.177919 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/097195ec-5a3f-4d57-b864-264165398ff6-dbus-socket\") pod \"nmstate-handler-v5kzk\" (UID: \"097195ec-5a3f-4d57-b864-264165398ff6\") " pod="openshift-nmstate/nmstate-handler-v5kzk" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.194675 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-774s6\" (UniqueName: \"kubernetes.io/projected/500a4a0f-2474-482b-9f47-7304d9bd35e9-kube-api-access-774s6\") pod \"nmstate-metrics-5dcf9c57c5-ctqlb\" (UID: \"500a4a0f-2474-482b-9f47-7304d9bd35e9\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-ctqlb" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.194794 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7rcg\" (UniqueName: \"kubernetes.io/projected/8ebd57a0-155f-481c-9d2e-11c69e14b6fc-kube-api-access-l7rcg\") pod \"nmstate-webhook-6b89b748d8-9rhdl\" (UID: \"8ebd57a0-155f-481c-9d2e-11c69e14b6fc\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-9rhdl" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.205639 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9jsq\" (UniqueName: \"kubernetes.io/projected/097195ec-5a3f-4d57-b864-264165398ff6-kube-api-access-n9jsq\") pod \"nmstate-handler-v5kzk\" (UID: \"097195ec-5a3f-4d57-b864-264165398ff6\") " pod="openshift-nmstate/nmstate-handler-v5kzk" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.279046 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/872167df-4435-42c4-9503-8bfca809574f-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-hpzbl\" (UID: \"872167df-4435-42c4-9503-8bfca809574f\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-hpzbl" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.279091 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/872167df-4435-42c4-9503-8bfca809574f-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-hpzbl\" (UID: \"872167df-4435-42c4-9503-8bfca809574f\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-hpzbl" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.279155 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jxpj2\" (UniqueName: \"kubernetes.io/projected/872167df-4435-42c4-9503-8bfca809574f-kube-api-access-jxpj2\") pod \"nmstate-console-plugin-5874bd7bc5-hpzbl\" (UID: \"872167df-4435-42c4-9503-8bfca809574f\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-hpzbl" Nov 26 22:35:33 crc kubenswrapper[4903]: E1126 22:35:33.279239 4903 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Nov 26 22:35:33 crc kubenswrapper[4903]: E1126 22:35:33.279315 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/872167df-4435-42c4-9503-8bfca809574f-plugin-serving-cert podName:872167df-4435-42c4-9503-8bfca809574f nodeName:}" failed. No retries permitted until 2025-11-26 22:35:33.779294554 +0000 UTC m=+862.469529464 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/872167df-4435-42c4-9503-8bfca809574f-plugin-serving-cert") pod "nmstate-console-plugin-5874bd7bc5-hpzbl" (UID: "872167df-4435-42c4-9503-8bfca809574f") : secret "plugin-serving-cert" not found Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.279979 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/872167df-4435-42c4-9503-8bfca809574f-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-hpzbl\" (UID: \"872167df-4435-42c4-9503-8bfca809574f\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-hpzbl" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.295480 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jxpj2\" (UniqueName: \"kubernetes.io/projected/872167df-4435-42c4-9503-8bfca809574f-kube-api-access-jxpj2\") pod \"nmstate-console-plugin-5874bd7bc5-hpzbl\" (UID: \"872167df-4435-42c4-9503-8bfca809574f\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-hpzbl" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.299933 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-d87bcd8c7-tgv7l"] Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.300783 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-d87bcd8c7-tgv7l" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.314976 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-d87bcd8c7-tgv7l"] Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.337044 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerStarted","Data":"9f68f340d26b09594de1e8e15e4a05a42e976379d490a108a32a7c6572cae165"} Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.380970 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/3ef46889-0257-408b-8f72-b8985eacb494-oauth-serving-cert\") pod \"console-d87bcd8c7-tgv7l\" (UID: \"3ef46889-0257-408b-8f72-b8985eacb494\") " pod="openshift-console/console-d87bcd8c7-tgv7l" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.381051 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/3ef46889-0257-408b-8f72-b8985eacb494-console-config\") pod \"console-d87bcd8c7-tgv7l\" (UID: \"3ef46889-0257-408b-8f72-b8985eacb494\") " pod="openshift-console/console-d87bcd8c7-tgv7l" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.381085 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3ef46889-0257-408b-8f72-b8985eacb494-trusted-ca-bundle\") pod \"console-d87bcd8c7-tgv7l\" (UID: \"3ef46889-0257-408b-8f72-b8985eacb494\") " pod="openshift-console/console-d87bcd8c7-tgv7l" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.381266 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ef46889-0257-408b-8f72-b8985eacb494-console-serving-cert\") pod \"console-d87bcd8c7-tgv7l\" (UID: \"3ef46889-0257-408b-8f72-b8985eacb494\") " pod="openshift-console/console-d87bcd8c7-tgv7l" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.381325 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3ef46889-0257-408b-8f72-b8985eacb494-service-ca\") pod \"console-d87bcd8c7-tgv7l\" (UID: \"3ef46889-0257-408b-8f72-b8985eacb494\") " pod="openshift-console/console-d87bcd8c7-tgv7l" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.381384 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rtr45\" (UniqueName: \"kubernetes.io/projected/3ef46889-0257-408b-8f72-b8985eacb494-kube-api-access-rtr45\") pod \"console-d87bcd8c7-tgv7l\" (UID: \"3ef46889-0257-408b-8f72-b8985eacb494\") " pod="openshift-console/console-d87bcd8c7-tgv7l" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.381435 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/3ef46889-0257-408b-8f72-b8985eacb494-console-oauth-config\") pod \"console-d87bcd8c7-tgv7l\" (UID: \"3ef46889-0257-408b-8f72-b8985eacb494\") " pod="openshift-console/console-d87bcd8c7-tgv7l" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.483112 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/3ef46889-0257-408b-8f72-b8985eacb494-console-oauth-config\") pod \"console-d87bcd8c7-tgv7l\" (UID: \"3ef46889-0257-408b-8f72-b8985eacb494\") " pod="openshift-console/console-d87bcd8c7-tgv7l" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.483212 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/3ef46889-0257-408b-8f72-b8985eacb494-oauth-serving-cert\") pod \"console-d87bcd8c7-tgv7l\" (UID: \"3ef46889-0257-408b-8f72-b8985eacb494\") " pod="openshift-console/console-d87bcd8c7-tgv7l" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.483234 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/3ef46889-0257-408b-8f72-b8985eacb494-console-config\") pod \"console-d87bcd8c7-tgv7l\" (UID: \"3ef46889-0257-408b-8f72-b8985eacb494\") " pod="openshift-console/console-d87bcd8c7-tgv7l" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.483267 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3ef46889-0257-408b-8f72-b8985eacb494-trusted-ca-bundle\") pod \"console-d87bcd8c7-tgv7l\" (UID: \"3ef46889-0257-408b-8f72-b8985eacb494\") " pod="openshift-console/console-d87bcd8c7-tgv7l" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.483333 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ef46889-0257-408b-8f72-b8985eacb494-console-serving-cert\") pod \"console-d87bcd8c7-tgv7l\" (UID: \"3ef46889-0257-408b-8f72-b8985eacb494\") " pod="openshift-console/console-d87bcd8c7-tgv7l" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.483352 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3ef46889-0257-408b-8f72-b8985eacb494-service-ca\") pod \"console-d87bcd8c7-tgv7l\" (UID: \"3ef46889-0257-408b-8f72-b8985eacb494\") " pod="openshift-console/console-d87bcd8c7-tgv7l" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.483371 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rtr45\" (UniqueName: \"kubernetes.io/projected/3ef46889-0257-408b-8f72-b8985eacb494-kube-api-access-rtr45\") pod \"console-d87bcd8c7-tgv7l\" (UID: \"3ef46889-0257-408b-8f72-b8985eacb494\") " pod="openshift-console/console-d87bcd8c7-tgv7l" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.484876 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/3ef46889-0257-408b-8f72-b8985eacb494-oauth-serving-cert\") pod \"console-d87bcd8c7-tgv7l\" (UID: \"3ef46889-0257-408b-8f72-b8985eacb494\") " pod="openshift-console/console-d87bcd8c7-tgv7l" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.484965 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/3ef46889-0257-408b-8f72-b8985eacb494-console-config\") pod \"console-d87bcd8c7-tgv7l\" (UID: \"3ef46889-0257-408b-8f72-b8985eacb494\") " pod="openshift-console/console-d87bcd8c7-tgv7l" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.485183 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3ef46889-0257-408b-8f72-b8985eacb494-trusted-ca-bundle\") pod \"console-d87bcd8c7-tgv7l\" (UID: \"3ef46889-0257-408b-8f72-b8985eacb494\") " pod="openshift-console/console-d87bcd8c7-tgv7l" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.485344 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3ef46889-0257-408b-8f72-b8985eacb494-service-ca\") pod \"console-d87bcd8c7-tgv7l\" (UID: \"3ef46889-0257-408b-8f72-b8985eacb494\") " pod="openshift-console/console-d87bcd8c7-tgv7l" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.486554 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/3ef46889-0257-408b-8f72-b8985eacb494-console-oauth-config\") pod \"console-d87bcd8c7-tgv7l\" (UID: \"3ef46889-0257-408b-8f72-b8985eacb494\") " pod="openshift-console/console-d87bcd8c7-tgv7l" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.491272 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ef46889-0257-408b-8f72-b8985eacb494-console-serving-cert\") pod \"console-d87bcd8c7-tgv7l\" (UID: \"3ef46889-0257-408b-8f72-b8985eacb494\") " pod="openshift-console/console-d87bcd8c7-tgv7l" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.505459 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rtr45\" (UniqueName: \"kubernetes.io/projected/3ef46889-0257-408b-8f72-b8985eacb494-kube-api-access-rtr45\") pod \"console-d87bcd8c7-tgv7l\" (UID: \"3ef46889-0257-408b-8f72-b8985eacb494\") " pod="openshift-console/console-d87bcd8c7-tgv7l" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.641455 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-d87bcd8c7-tgv7l" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.687819 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/8ebd57a0-155f-481c-9d2e-11c69e14b6fc-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-9rhdl\" (UID: \"8ebd57a0-155f-481c-9d2e-11c69e14b6fc\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-9rhdl" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.693999 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/8ebd57a0-155f-481c-9d2e-11c69e14b6fc-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-9rhdl\" (UID: \"8ebd57a0-155f-481c-9d2e-11c69e14b6fc\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-9rhdl" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.789113 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/872167df-4435-42c4-9503-8bfca809574f-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-hpzbl\" (UID: \"872167df-4435-42c4-9503-8bfca809574f\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-hpzbl" Nov 26 22:35:33 crc kubenswrapper[4903]: I1126 22:35:33.799683 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/872167df-4435-42c4-9503-8bfca809574f-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-hpzbl\" (UID: \"872167df-4435-42c4-9503-8bfca809574f\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-hpzbl" Nov 26 22:35:34 crc kubenswrapper[4903]: I1126 22:35:34.026552 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-hpzbl" Nov 26 22:35:34 crc kubenswrapper[4903]: I1126 22:35:34.108759 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-d87bcd8c7-tgv7l"] Nov 26 22:35:34 crc kubenswrapper[4903]: W1126 22:35:34.123044 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3ef46889_0257_408b_8f72_b8985eacb494.slice/crio-502c3d7b509a8c583657b0268116a466c703672f8e8e8f34b3e30d5eef939fd4 WatchSource:0}: Error finding container 502c3d7b509a8c583657b0268116a466c703672f8e8e8f34b3e30d5eef939fd4: Status 404 returned error can't find the container with id 502c3d7b509a8c583657b0268116a466c703672f8e8e8f34b3e30d5eef939fd4 Nov 26 22:35:34 crc kubenswrapper[4903]: I1126 22:35:34.275613 4903 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-ctqlb" secret="" err="failed to sync secret cache: timed out waiting for the condition" Nov 26 22:35:34 crc kubenswrapper[4903]: I1126 22:35:34.275665 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-ctqlb" Nov 26 22:35:34 crc kubenswrapper[4903]: I1126 22:35:34.329778 4903 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openshift-nmstate/nmstate-handler-v5kzk" secret="" err="failed to sync secret cache: timed out waiting for the condition" Nov 26 22:35:34 crc kubenswrapper[4903]: I1126 22:35:34.329830 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-v5kzk" Nov 26 22:35:34 crc kubenswrapper[4903]: I1126 22:35:34.350312 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-d87bcd8c7-tgv7l" event={"ID":"3ef46889-0257-408b-8f72-b8985eacb494","Type":"ContainerStarted","Data":"017852a728600949ccb133ee9540cfa26374139c03a9d09f828ca85584962622"} Nov 26 22:35:34 crc kubenswrapper[4903]: I1126 22:35:34.350356 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-d87bcd8c7-tgv7l" event={"ID":"3ef46889-0257-408b-8f72-b8985eacb494","Type":"ContainerStarted","Data":"502c3d7b509a8c583657b0268116a466c703672f8e8e8f34b3e30d5eef939fd4"} Nov 26 22:35:34 crc kubenswrapper[4903]: I1126 22:35:34.379198 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-d87bcd8c7-tgv7l" podStartSLOduration=1.37918113 podStartE2EDuration="1.37918113s" podCreationTimestamp="2025-11-26 22:35:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:35:34.37432638 +0000 UTC m=+863.064561290" watchObservedRunningTime="2025-11-26 22:35:34.37918113 +0000 UTC m=+863.069416040" Nov 26 22:35:34 crc kubenswrapper[4903]: I1126 22:35:34.448806 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-lvs9v" Nov 26 22:35:34 crc kubenswrapper[4903]: I1126 22:35:34.456083 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-9rhdl" Nov 26 22:35:34 crc kubenswrapper[4903]: I1126 22:35:34.504608 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-hpzbl"] Nov 26 22:35:34 crc kubenswrapper[4903]: I1126 22:35:34.681085 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-9rhdl"] Nov 26 22:35:34 crc kubenswrapper[4903]: W1126 22:35:34.700554 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8ebd57a0_155f_481c_9d2e_11c69e14b6fc.slice/crio-fc7f99c075d7a73d11f2569401320bb890c34f079bdeacaf302625280477252c WatchSource:0}: Error finding container fc7f99c075d7a73d11f2569401320bb890c34f079bdeacaf302625280477252c: Status 404 returned error can't find the container with id fc7f99c075d7a73d11f2569401320bb890c34f079bdeacaf302625280477252c Nov 26 22:35:34 crc kubenswrapper[4903]: I1126 22:35:34.713759 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-ctqlb"] Nov 26 22:35:35 crc kubenswrapper[4903]: I1126 22:35:35.358768 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-9rhdl" event={"ID":"8ebd57a0-155f-481c-9d2e-11c69e14b6fc","Type":"ContainerStarted","Data":"fc7f99c075d7a73d11f2569401320bb890c34f079bdeacaf302625280477252c"} Nov 26 22:35:35 crc kubenswrapper[4903]: I1126 22:35:35.360129 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-ctqlb" event={"ID":"500a4a0f-2474-482b-9f47-7304d9bd35e9","Type":"ContainerStarted","Data":"1fe52479eb44490553a277291b7fb7d8545eefaaf173de5d58d173e551720db2"} Nov 26 22:35:35 crc kubenswrapper[4903]: I1126 22:35:35.361847 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-hpzbl" event={"ID":"872167df-4435-42c4-9503-8bfca809574f","Type":"ContainerStarted","Data":"72888784a4e62f0f0001cb3dcf0f7b777c2a9c04d252f50e8c9abc09eb7934df"} Nov 26 22:35:35 crc kubenswrapper[4903]: I1126 22:35:35.363248 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-v5kzk" event={"ID":"097195ec-5a3f-4d57-b864-264165398ff6","Type":"ContainerStarted","Data":"1c367868ac962e8b251196a3ead09e84ad156747dba785ed829e0ab847e85bbf"} Nov 26 22:35:39 crc kubenswrapper[4903]: I1126 22:35:39.396344 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-hpzbl" event={"ID":"872167df-4435-42c4-9503-8bfca809574f","Type":"ContainerStarted","Data":"d32923fa933882b237b590921fbe0a355873936b4db704f4b43eb9ecea7f10db"} Nov 26 22:35:39 crc kubenswrapper[4903]: I1126 22:35:39.398397 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-v5kzk" event={"ID":"097195ec-5a3f-4d57-b864-264165398ff6","Type":"ContainerStarted","Data":"bd2ed0461ad7a12beff26d3ca50f72f00d8808fad0db576de787d16344ca6d70"} Nov 26 22:35:39 crc kubenswrapper[4903]: I1126 22:35:39.399610 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-v5kzk" Nov 26 22:35:39 crc kubenswrapper[4903]: I1126 22:35:39.402519 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-9rhdl" event={"ID":"8ebd57a0-155f-481c-9d2e-11c69e14b6fc","Type":"ContainerStarted","Data":"8d6466c263682ba3bb35dc38b949705667f52ffcab0194829e8e76a4c5ae8358"} Nov 26 22:35:39 crc kubenswrapper[4903]: I1126 22:35:39.402812 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-9rhdl" Nov 26 22:35:39 crc kubenswrapper[4903]: I1126 22:35:39.404491 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-ctqlb" event={"ID":"500a4a0f-2474-482b-9f47-7304d9bd35e9","Type":"ContainerStarted","Data":"0871b51c162da8a73c48fd305c6cf3f3eecdc5480c72e728c5297466fcdb012d"} Nov 26 22:35:39 crc kubenswrapper[4903]: I1126 22:35:39.419639 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-hpzbl" podStartSLOduration=2.40917322 podStartE2EDuration="6.419615216s" podCreationTimestamp="2025-11-26 22:35:33 +0000 UTC" firstStartedPulling="2025-11-26 22:35:34.51440176 +0000 UTC m=+863.204636670" lastFinishedPulling="2025-11-26 22:35:38.524843716 +0000 UTC m=+867.215078666" observedRunningTime="2025-11-26 22:35:39.41527772 +0000 UTC m=+868.105512670" watchObservedRunningTime="2025-11-26 22:35:39.419615216 +0000 UTC m=+868.109850136" Nov 26 22:35:39 crc kubenswrapper[4903]: I1126 22:35:39.464866 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-9rhdl" podStartSLOduration=3.633445618 podStartE2EDuration="7.464684509s" podCreationTimestamp="2025-11-26 22:35:32 +0000 UTC" firstStartedPulling="2025-11-26 22:35:34.702930094 +0000 UTC m=+863.393165004" lastFinishedPulling="2025-11-26 22:35:38.534168935 +0000 UTC m=+867.224403895" observedRunningTime="2025-11-26 22:35:39.453567622 +0000 UTC m=+868.143802552" watchObservedRunningTime="2025-11-26 22:35:39.464684509 +0000 UTC m=+868.154919429" Nov 26 22:35:39 crc kubenswrapper[4903]: I1126 22:35:39.500587 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-v5kzk" podStartSLOduration=3.308508502 podStartE2EDuration="7.500565406s" podCreationTimestamp="2025-11-26 22:35:32 +0000 UTC" firstStartedPulling="2025-11-26 22:35:34.359215387 +0000 UTC m=+863.049450297" lastFinishedPulling="2025-11-26 22:35:38.551272251 +0000 UTC m=+867.241507201" observedRunningTime="2025-11-26 22:35:39.495075311 +0000 UTC m=+868.185310231" watchObservedRunningTime="2025-11-26 22:35:39.500565406 +0000 UTC m=+868.190800326" Nov 26 22:35:42 crc kubenswrapper[4903]: I1126 22:35:42.701834 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-ctqlb" event={"ID":"500a4a0f-2474-482b-9f47-7304d9bd35e9","Type":"ContainerStarted","Data":"174c9695ee6fee0891835591881029f03ec0a1286a2bdd848db77d6d1a91cb8d"} Nov 26 22:35:42 crc kubenswrapper[4903]: I1126 22:35:42.737751 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-ctqlb" podStartSLOduration=3.48333586 podStartE2EDuration="10.737721346s" podCreationTimestamp="2025-11-26 22:35:32 +0000 UTC" firstStartedPulling="2025-11-26 22:35:34.719203369 +0000 UTC m=+863.409438279" lastFinishedPulling="2025-11-26 22:35:41.973588855 +0000 UTC m=+870.663823765" observedRunningTime="2025-11-26 22:35:42.724936214 +0000 UTC m=+871.415171185" watchObservedRunningTime="2025-11-26 22:35:42.737721346 +0000 UTC m=+871.427956286" Nov 26 22:35:43 crc kubenswrapper[4903]: I1126 22:35:43.641988 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-d87bcd8c7-tgv7l" Nov 26 22:35:43 crc kubenswrapper[4903]: I1126 22:35:43.642058 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-d87bcd8c7-tgv7l" Nov 26 22:35:43 crc kubenswrapper[4903]: I1126 22:35:43.649999 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-d87bcd8c7-tgv7l" Nov 26 22:35:43 crc kubenswrapper[4903]: I1126 22:35:43.717342 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-d87bcd8c7-tgv7l" Nov 26 22:35:43 crc kubenswrapper[4903]: I1126 22:35:43.801027 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-5dc67597bd-6lscg"] Nov 26 22:35:44 crc kubenswrapper[4903]: I1126 22:35:44.370792 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-v5kzk" Nov 26 22:35:54 crc kubenswrapper[4903]: I1126 22:35:54.466234 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-9rhdl" Nov 26 22:36:08 crc kubenswrapper[4903]: I1126 22:36:08.867570 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-5dc67597bd-6lscg" podUID="1c981989-99f6-4954-b225-9182997e82e0" containerName="console" containerID="cri-o://c7b46bca3fcef3916c357064d13466bc6fc2f2354a12fa2497e25e829db3582a" gracePeriod=15 Nov 26 22:36:09 crc kubenswrapper[4903]: I1126 22:36:09.367574 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-5dc67597bd-6lscg_1c981989-99f6-4954-b225-9182997e82e0/console/0.log" Nov 26 22:36:09 crc kubenswrapper[4903]: I1126 22:36:09.367874 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5dc67597bd-6lscg" Nov 26 22:36:09 crc kubenswrapper[4903]: I1126 22:36:09.480930 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1c981989-99f6-4954-b225-9182997e82e0-console-serving-cert\") pod \"1c981989-99f6-4954-b225-9182997e82e0\" (UID: \"1c981989-99f6-4954-b225-9182997e82e0\") " Nov 26 22:36:09 crc kubenswrapper[4903]: I1126 22:36:09.481214 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1c981989-99f6-4954-b225-9182997e82e0-console-config\") pod \"1c981989-99f6-4954-b225-9182997e82e0\" (UID: \"1c981989-99f6-4954-b225-9182997e82e0\") " Nov 26 22:36:09 crc kubenswrapper[4903]: I1126 22:36:09.481369 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1c981989-99f6-4954-b225-9182997e82e0-service-ca\") pod \"1c981989-99f6-4954-b225-9182997e82e0\" (UID: \"1c981989-99f6-4954-b225-9182997e82e0\") " Nov 26 22:36:09 crc kubenswrapper[4903]: I1126 22:36:09.481404 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1c981989-99f6-4954-b225-9182997e82e0-console-oauth-config\") pod \"1c981989-99f6-4954-b225-9182997e82e0\" (UID: \"1c981989-99f6-4954-b225-9182997e82e0\") " Nov 26 22:36:09 crc kubenswrapper[4903]: I1126 22:36:09.481452 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qxnkj\" (UniqueName: \"kubernetes.io/projected/1c981989-99f6-4954-b225-9182997e82e0-kube-api-access-qxnkj\") pod \"1c981989-99f6-4954-b225-9182997e82e0\" (UID: \"1c981989-99f6-4954-b225-9182997e82e0\") " Nov 26 22:36:09 crc kubenswrapper[4903]: I1126 22:36:09.481543 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1c981989-99f6-4954-b225-9182997e82e0-oauth-serving-cert\") pod \"1c981989-99f6-4954-b225-9182997e82e0\" (UID: \"1c981989-99f6-4954-b225-9182997e82e0\") " Nov 26 22:36:09 crc kubenswrapper[4903]: I1126 22:36:09.481564 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1c981989-99f6-4954-b225-9182997e82e0-trusted-ca-bundle\") pod \"1c981989-99f6-4954-b225-9182997e82e0\" (UID: \"1c981989-99f6-4954-b225-9182997e82e0\") " Nov 26 22:36:09 crc kubenswrapper[4903]: I1126 22:36:09.482037 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c981989-99f6-4954-b225-9182997e82e0-console-config" (OuterVolumeSpecName: "console-config") pod "1c981989-99f6-4954-b225-9182997e82e0" (UID: "1c981989-99f6-4954-b225-9182997e82e0"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:36:09 crc kubenswrapper[4903]: I1126 22:36:09.482360 4903 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1c981989-99f6-4954-b225-9182997e82e0-console-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:36:09 crc kubenswrapper[4903]: I1126 22:36:09.483092 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c981989-99f6-4954-b225-9182997e82e0-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "1c981989-99f6-4954-b225-9182997e82e0" (UID: "1c981989-99f6-4954-b225-9182997e82e0"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:36:09 crc kubenswrapper[4903]: I1126 22:36:09.483874 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c981989-99f6-4954-b225-9182997e82e0-service-ca" (OuterVolumeSpecName: "service-ca") pod "1c981989-99f6-4954-b225-9182997e82e0" (UID: "1c981989-99f6-4954-b225-9182997e82e0"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:36:09 crc kubenswrapper[4903]: I1126 22:36:09.484281 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c981989-99f6-4954-b225-9182997e82e0-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1c981989-99f6-4954-b225-9182997e82e0" (UID: "1c981989-99f6-4954-b225-9182997e82e0"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:36:09 crc kubenswrapper[4903]: I1126 22:36:09.487888 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c981989-99f6-4954-b225-9182997e82e0-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "1c981989-99f6-4954-b225-9182997e82e0" (UID: "1c981989-99f6-4954-b225-9182997e82e0"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:36:09 crc kubenswrapper[4903]: I1126 22:36:09.489323 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c981989-99f6-4954-b225-9182997e82e0-kube-api-access-qxnkj" (OuterVolumeSpecName: "kube-api-access-qxnkj") pod "1c981989-99f6-4954-b225-9182997e82e0" (UID: "1c981989-99f6-4954-b225-9182997e82e0"). InnerVolumeSpecName "kube-api-access-qxnkj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:36:09 crc kubenswrapper[4903]: I1126 22:36:09.490834 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c981989-99f6-4954-b225-9182997e82e0-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "1c981989-99f6-4954-b225-9182997e82e0" (UID: "1c981989-99f6-4954-b225-9182997e82e0"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:36:09 crc kubenswrapper[4903]: I1126 22:36:09.583563 4903 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1c981989-99f6-4954-b225-9182997e82e0-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:36:09 crc kubenswrapper[4903]: I1126 22:36:09.583810 4903 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1c981989-99f6-4954-b225-9182997e82e0-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:36:09 crc kubenswrapper[4903]: I1126 22:36:09.583819 4903 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1c981989-99f6-4954-b225-9182997e82e0-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:36:09 crc kubenswrapper[4903]: I1126 22:36:09.583831 4903 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1c981989-99f6-4954-b225-9182997e82e0-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 22:36:09 crc kubenswrapper[4903]: I1126 22:36:09.583879 4903 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1c981989-99f6-4954-b225-9182997e82e0-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:36:09 crc kubenswrapper[4903]: I1126 22:36:09.583889 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qxnkj\" (UniqueName: \"kubernetes.io/projected/1c981989-99f6-4954-b225-9182997e82e0-kube-api-access-qxnkj\") on node \"crc\" DevicePath \"\"" Nov 26 22:36:09 crc kubenswrapper[4903]: I1126 22:36:09.956950 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-5dc67597bd-6lscg_1c981989-99f6-4954-b225-9182997e82e0/console/0.log" Nov 26 22:36:09 crc kubenswrapper[4903]: I1126 22:36:09.957024 4903 generic.go:334] "Generic (PLEG): container finished" podID="1c981989-99f6-4954-b225-9182997e82e0" containerID="c7b46bca3fcef3916c357064d13466bc6fc2f2354a12fa2497e25e829db3582a" exitCode=2 Nov 26 22:36:09 crc kubenswrapper[4903]: I1126 22:36:09.957067 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5dc67597bd-6lscg" event={"ID":"1c981989-99f6-4954-b225-9182997e82e0","Type":"ContainerDied","Data":"c7b46bca3fcef3916c357064d13466bc6fc2f2354a12fa2497e25e829db3582a"} Nov 26 22:36:09 crc kubenswrapper[4903]: I1126 22:36:09.957105 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5dc67597bd-6lscg" event={"ID":"1c981989-99f6-4954-b225-9182997e82e0","Type":"ContainerDied","Data":"5534bedf413c681f318678398be23daccf0dec6d0c6d74f61cf9bb3b27a51c4a"} Nov 26 22:36:09 crc kubenswrapper[4903]: I1126 22:36:09.957134 4903 scope.go:117] "RemoveContainer" containerID="c7b46bca3fcef3916c357064d13466bc6fc2f2354a12fa2497e25e829db3582a" Nov 26 22:36:09 crc kubenswrapper[4903]: I1126 22:36:09.957344 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5dc67597bd-6lscg" Nov 26 22:36:10 crc kubenswrapper[4903]: I1126 22:36:10.044752 4903 scope.go:117] "RemoveContainer" containerID="c7b46bca3fcef3916c357064d13466bc6fc2f2354a12fa2497e25e829db3582a" Nov 26 22:36:10 crc kubenswrapper[4903]: E1126 22:36:10.064937 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c7b46bca3fcef3916c357064d13466bc6fc2f2354a12fa2497e25e829db3582a\": container with ID starting with c7b46bca3fcef3916c357064d13466bc6fc2f2354a12fa2497e25e829db3582a not found: ID does not exist" containerID="c7b46bca3fcef3916c357064d13466bc6fc2f2354a12fa2497e25e829db3582a" Nov 26 22:36:10 crc kubenswrapper[4903]: I1126 22:36:10.064995 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7b46bca3fcef3916c357064d13466bc6fc2f2354a12fa2497e25e829db3582a"} err="failed to get container status \"c7b46bca3fcef3916c357064d13466bc6fc2f2354a12fa2497e25e829db3582a\": rpc error: code = NotFound desc = could not find container \"c7b46bca3fcef3916c357064d13466bc6fc2f2354a12fa2497e25e829db3582a\": container with ID starting with c7b46bca3fcef3916c357064d13466bc6fc2f2354a12fa2497e25e829db3582a not found: ID does not exist" Nov 26 22:36:10 crc kubenswrapper[4903]: I1126 22:36:10.080374 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-5dc67597bd-6lscg"] Nov 26 22:36:10 crc kubenswrapper[4903]: I1126 22:36:10.081444 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-5dc67597bd-6lscg"] Nov 26 22:36:12 crc kubenswrapper[4903]: I1126 22:36:12.042779 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c981989-99f6-4954-b225-9182997e82e0" path="/var/lib/kubelet/pods/1c981989-99f6-4954-b225-9182997e82e0/volumes" Nov 26 22:36:15 crc kubenswrapper[4903]: I1126 22:36:15.746743 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf"] Nov 26 22:36:15 crc kubenswrapper[4903]: E1126 22:36:15.747680 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c981989-99f6-4954-b225-9182997e82e0" containerName="console" Nov 26 22:36:15 crc kubenswrapper[4903]: I1126 22:36:15.747723 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c981989-99f6-4954-b225-9182997e82e0" containerName="console" Nov 26 22:36:15 crc kubenswrapper[4903]: I1126 22:36:15.747976 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c981989-99f6-4954-b225-9182997e82e0" containerName="console" Nov 26 22:36:15 crc kubenswrapper[4903]: I1126 22:36:15.749678 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf" Nov 26 22:36:15 crc kubenswrapper[4903]: I1126 22:36:15.752902 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 26 22:36:15 crc kubenswrapper[4903]: I1126 22:36:15.761783 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf"] Nov 26 22:36:15 crc kubenswrapper[4903]: I1126 22:36:15.801886 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0e3ab50d-4ada-420a-b14c-3bcd20623c58-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf\" (UID: \"0e3ab50d-4ada-420a-b14c-3bcd20623c58\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf" Nov 26 22:36:15 crc kubenswrapper[4903]: I1126 22:36:15.802127 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xcvls\" (UniqueName: \"kubernetes.io/projected/0e3ab50d-4ada-420a-b14c-3bcd20623c58-kube-api-access-xcvls\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf\" (UID: \"0e3ab50d-4ada-420a-b14c-3bcd20623c58\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf" Nov 26 22:36:15 crc kubenswrapper[4903]: I1126 22:36:15.802315 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0e3ab50d-4ada-420a-b14c-3bcd20623c58-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf\" (UID: \"0e3ab50d-4ada-420a-b14c-3bcd20623c58\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf" Nov 26 22:36:15 crc kubenswrapper[4903]: I1126 22:36:15.903628 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0e3ab50d-4ada-420a-b14c-3bcd20623c58-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf\" (UID: \"0e3ab50d-4ada-420a-b14c-3bcd20623c58\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf" Nov 26 22:36:15 crc kubenswrapper[4903]: I1126 22:36:15.903832 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xcvls\" (UniqueName: \"kubernetes.io/projected/0e3ab50d-4ada-420a-b14c-3bcd20623c58-kube-api-access-xcvls\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf\" (UID: \"0e3ab50d-4ada-420a-b14c-3bcd20623c58\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf" Nov 26 22:36:15 crc kubenswrapper[4903]: I1126 22:36:15.903895 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0e3ab50d-4ada-420a-b14c-3bcd20623c58-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf\" (UID: \"0e3ab50d-4ada-420a-b14c-3bcd20623c58\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf" Nov 26 22:36:15 crc kubenswrapper[4903]: I1126 22:36:15.904380 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0e3ab50d-4ada-420a-b14c-3bcd20623c58-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf\" (UID: \"0e3ab50d-4ada-420a-b14c-3bcd20623c58\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf" Nov 26 22:36:15 crc kubenswrapper[4903]: I1126 22:36:15.904607 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0e3ab50d-4ada-420a-b14c-3bcd20623c58-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf\" (UID: \"0e3ab50d-4ada-420a-b14c-3bcd20623c58\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf" Nov 26 22:36:15 crc kubenswrapper[4903]: I1126 22:36:15.938436 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xcvls\" (UniqueName: \"kubernetes.io/projected/0e3ab50d-4ada-420a-b14c-3bcd20623c58-kube-api-access-xcvls\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf\" (UID: \"0e3ab50d-4ada-420a-b14c-3bcd20623c58\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf" Nov 26 22:36:16 crc kubenswrapper[4903]: I1126 22:36:16.074927 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf" Nov 26 22:36:16 crc kubenswrapper[4903]: I1126 22:36:16.605141 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf"] Nov 26 22:36:17 crc kubenswrapper[4903]: I1126 22:36:17.012537 4903 generic.go:334] "Generic (PLEG): container finished" podID="0e3ab50d-4ada-420a-b14c-3bcd20623c58" containerID="71a45f9a0f0492c41c34a9738b77d2e3dbe9a29ddce365a04aad39439ef4f93f" exitCode=0 Nov 26 22:36:17 crc kubenswrapper[4903]: I1126 22:36:17.012604 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf" event={"ID":"0e3ab50d-4ada-420a-b14c-3bcd20623c58","Type":"ContainerDied","Data":"71a45f9a0f0492c41c34a9738b77d2e3dbe9a29ddce365a04aad39439ef4f93f"} Nov 26 22:36:17 crc kubenswrapper[4903]: I1126 22:36:17.012868 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf" event={"ID":"0e3ab50d-4ada-420a-b14c-3bcd20623c58","Type":"ContainerStarted","Data":"a2a5608b4a0bbfdf776c2287619b5db546c523c93bdcb73aee92cd8c8bcdb4a3"} Nov 26 22:36:17 crc kubenswrapper[4903]: I1126 22:36:17.015652 4903 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 22:36:19 crc kubenswrapper[4903]: I1126 22:36:19.031041 4903 generic.go:334] "Generic (PLEG): container finished" podID="0e3ab50d-4ada-420a-b14c-3bcd20623c58" containerID="246c6a8b5c028e9cc17a1cb5dac9f65cf31bd42bad5bed0688419b913eec1569" exitCode=0 Nov 26 22:36:19 crc kubenswrapper[4903]: I1126 22:36:19.031128 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf" event={"ID":"0e3ab50d-4ada-420a-b14c-3bcd20623c58","Type":"ContainerDied","Data":"246c6a8b5c028e9cc17a1cb5dac9f65cf31bd42bad5bed0688419b913eec1569"} Nov 26 22:36:20 crc kubenswrapper[4903]: I1126 22:36:20.096370 4903 generic.go:334] "Generic (PLEG): container finished" podID="0e3ab50d-4ada-420a-b14c-3bcd20623c58" containerID="108251504da93c897abc50b09c9590a8baccacec2266a1b86f6a4b1597a38cf9" exitCode=0 Nov 26 22:36:20 crc kubenswrapper[4903]: I1126 22:36:20.096480 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf" event={"ID":"0e3ab50d-4ada-420a-b14c-3bcd20623c58","Type":"ContainerDied","Data":"108251504da93c897abc50b09c9590a8baccacec2266a1b86f6a4b1597a38cf9"} Nov 26 22:36:21 crc kubenswrapper[4903]: I1126 22:36:21.539862 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf" Nov 26 22:36:21 crc kubenswrapper[4903]: I1126 22:36:21.638133 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0e3ab50d-4ada-420a-b14c-3bcd20623c58-bundle\") pod \"0e3ab50d-4ada-420a-b14c-3bcd20623c58\" (UID: \"0e3ab50d-4ada-420a-b14c-3bcd20623c58\") " Nov 26 22:36:21 crc kubenswrapper[4903]: I1126 22:36:21.638236 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0e3ab50d-4ada-420a-b14c-3bcd20623c58-util\") pod \"0e3ab50d-4ada-420a-b14c-3bcd20623c58\" (UID: \"0e3ab50d-4ada-420a-b14c-3bcd20623c58\") " Nov 26 22:36:21 crc kubenswrapper[4903]: I1126 22:36:21.638300 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcvls\" (UniqueName: \"kubernetes.io/projected/0e3ab50d-4ada-420a-b14c-3bcd20623c58-kube-api-access-xcvls\") pod \"0e3ab50d-4ada-420a-b14c-3bcd20623c58\" (UID: \"0e3ab50d-4ada-420a-b14c-3bcd20623c58\") " Nov 26 22:36:21 crc kubenswrapper[4903]: I1126 22:36:21.639595 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e3ab50d-4ada-420a-b14c-3bcd20623c58-bundle" (OuterVolumeSpecName: "bundle") pod "0e3ab50d-4ada-420a-b14c-3bcd20623c58" (UID: "0e3ab50d-4ada-420a-b14c-3bcd20623c58"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:36:21 crc kubenswrapper[4903]: I1126 22:36:21.648006 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e3ab50d-4ada-420a-b14c-3bcd20623c58-kube-api-access-xcvls" (OuterVolumeSpecName: "kube-api-access-xcvls") pod "0e3ab50d-4ada-420a-b14c-3bcd20623c58" (UID: "0e3ab50d-4ada-420a-b14c-3bcd20623c58"). InnerVolumeSpecName "kube-api-access-xcvls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:36:21 crc kubenswrapper[4903]: I1126 22:36:21.678408 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e3ab50d-4ada-420a-b14c-3bcd20623c58-util" (OuterVolumeSpecName: "util") pod "0e3ab50d-4ada-420a-b14c-3bcd20623c58" (UID: "0e3ab50d-4ada-420a-b14c-3bcd20623c58"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:36:21 crc kubenswrapper[4903]: I1126 22:36:21.739826 4903 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0e3ab50d-4ada-420a-b14c-3bcd20623c58-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:36:21 crc kubenswrapper[4903]: I1126 22:36:21.739856 4903 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0e3ab50d-4ada-420a-b14c-3bcd20623c58-util\") on node \"crc\" DevicePath \"\"" Nov 26 22:36:21 crc kubenswrapper[4903]: I1126 22:36:21.739866 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcvls\" (UniqueName: \"kubernetes.io/projected/0e3ab50d-4ada-420a-b14c-3bcd20623c58-kube-api-access-xcvls\") on node \"crc\" DevicePath \"\"" Nov 26 22:36:22 crc kubenswrapper[4903]: I1126 22:36:22.117228 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf" event={"ID":"0e3ab50d-4ada-420a-b14c-3bcd20623c58","Type":"ContainerDied","Data":"a2a5608b4a0bbfdf776c2287619b5db546c523c93bdcb73aee92cd8c8bcdb4a3"} Nov 26 22:36:22 crc kubenswrapper[4903]: I1126 22:36:22.117288 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a2a5608b4a0bbfdf776c2287619b5db546c523c93bdcb73aee92cd8c8bcdb4a3" Nov 26 22:36:22 crc kubenswrapper[4903]: I1126 22:36:22.117329 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.174380 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb"] Nov 26 22:36:31 crc kubenswrapper[4903]: E1126 22:36:31.175956 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e3ab50d-4ada-420a-b14c-3bcd20623c58" containerName="pull" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.176048 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e3ab50d-4ada-420a-b14c-3bcd20623c58" containerName="pull" Nov 26 22:36:31 crc kubenswrapper[4903]: E1126 22:36:31.176111 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e3ab50d-4ada-420a-b14c-3bcd20623c58" containerName="util" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.176165 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e3ab50d-4ada-420a-b14c-3bcd20623c58" containerName="util" Nov 26 22:36:31 crc kubenswrapper[4903]: E1126 22:36:31.176222 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e3ab50d-4ada-420a-b14c-3bcd20623c58" containerName="extract" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.176277 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e3ab50d-4ada-420a-b14c-3bcd20623c58" containerName="extract" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.176453 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e3ab50d-4ada-420a-b14c-3bcd20623c58" containerName="extract" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.177004 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.179260 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.179324 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.181092 4903 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.181137 4903 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-xdhsn" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.187963 4903 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.204720 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb"] Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.299769 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/b5900302-4880-4732-a477-8ed6cf3bfec3-apiservice-cert\") pod \"metallb-operator-controller-manager-57594f7c4c-gdzqb\" (UID: \"b5900302-4880-4732-a477-8ed6cf3bfec3\") " pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.299817 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/b5900302-4880-4732-a477-8ed6cf3bfec3-webhook-cert\") pod \"metallb-operator-controller-manager-57594f7c4c-gdzqb\" (UID: \"b5900302-4880-4732-a477-8ed6cf3bfec3\") " pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.299911 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtqhg\" (UniqueName: \"kubernetes.io/projected/b5900302-4880-4732-a477-8ed6cf3bfec3-kube-api-access-gtqhg\") pod \"metallb-operator-controller-manager-57594f7c4c-gdzqb\" (UID: \"b5900302-4880-4732-a477-8ed6cf3bfec3\") " pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.401727 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtqhg\" (UniqueName: \"kubernetes.io/projected/b5900302-4880-4732-a477-8ed6cf3bfec3-kube-api-access-gtqhg\") pod \"metallb-operator-controller-manager-57594f7c4c-gdzqb\" (UID: \"b5900302-4880-4732-a477-8ed6cf3bfec3\") " pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.401840 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/b5900302-4880-4732-a477-8ed6cf3bfec3-apiservice-cert\") pod \"metallb-operator-controller-manager-57594f7c4c-gdzqb\" (UID: \"b5900302-4880-4732-a477-8ed6cf3bfec3\") " pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.401885 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/b5900302-4880-4732-a477-8ed6cf3bfec3-webhook-cert\") pod \"metallb-operator-controller-manager-57594f7c4c-gdzqb\" (UID: \"b5900302-4880-4732-a477-8ed6cf3bfec3\") " pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.408902 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/b5900302-4880-4732-a477-8ed6cf3bfec3-webhook-cert\") pod \"metallb-operator-controller-manager-57594f7c4c-gdzqb\" (UID: \"b5900302-4880-4732-a477-8ed6cf3bfec3\") " pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.409254 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/b5900302-4880-4732-a477-8ed6cf3bfec3-apiservice-cert\") pod \"metallb-operator-controller-manager-57594f7c4c-gdzqb\" (UID: \"b5900302-4880-4732-a477-8ed6cf3bfec3\") " pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.422365 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtqhg\" (UniqueName: \"kubernetes.io/projected/b5900302-4880-4732-a477-8ed6cf3bfec3-kube-api-access-gtqhg\") pod \"metallb-operator-controller-manager-57594f7c4c-gdzqb\" (UID: \"b5900302-4880-4732-a477-8ed6cf3bfec3\") " pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.442324 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-d76ff59f5-8bdc9"] Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.443172 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-d76ff59f5-8bdc9" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.446758 4903 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-wn999" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.446790 4903 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.447633 4903 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.492140 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.497045 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-d76ff59f5-8bdc9"] Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.505985 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1c731f8b-9333-4076-b193-54255a31e938-apiservice-cert\") pod \"metallb-operator-webhook-server-d76ff59f5-8bdc9\" (UID: \"1c731f8b-9333-4076-b193-54255a31e938\") " pod="metallb-system/metallb-operator-webhook-server-d76ff59f5-8bdc9" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.506078 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45zff\" (UniqueName: \"kubernetes.io/projected/1c731f8b-9333-4076-b193-54255a31e938-kube-api-access-45zff\") pod \"metallb-operator-webhook-server-d76ff59f5-8bdc9\" (UID: \"1c731f8b-9333-4076-b193-54255a31e938\") " pod="metallb-system/metallb-operator-webhook-server-d76ff59f5-8bdc9" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.506113 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1c731f8b-9333-4076-b193-54255a31e938-webhook-cert\") pod \"metallb-operator-webhook-server-d76ff59f5-8bdc9\" (UID: \"1c731f8b-9333-4076-b193-54255a31e938\") " pod="metallb-system/metallb-operator-webhook-server-d76ff59f5-8bdc9" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.607868 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1c731f8b-9333-4076-b193-54255a31e938-apiservice-cert\") pod \"metallb-operator-webhook-server-d76ff59f5-8bdc9\" (UID: \"1c731f8b-9333-4076-b193-54255a31e938\") " pod="metallb-system/metallb-operator-webhook-server-d76ff59f5-8bdc9" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.607978 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45zff\" (UniqueName: \"kubernetes.io/projected/1c731f8b-9333-4076-b193-54255a31e938-kube-api-access-45zff\") pod \"metallb-operator-webhook-server-d76ff59f5-8bdc9\" (UID: \"1c731f8b-9333-4076-b193-54255a31e938\") " pod="metallb-system/metallb-operator-webhook-server-d76ff59f5-8bdc9" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.608028 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1c731f8b-9333-4076-b193-54255a31e938-webhook-cert\") pod \"metallb-operator-webhook-server-d76ff59f5-8bdc9\" (UID: \"1c731f8b-9333-4076-b193-54255a31e938\") " pod="metallb-system/metallb-operator-webhook-server-d76ff59f5-8bdc9" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.612022 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1c731f8b-9333-4076-b193-54255a31e938-webhook-cert\") pod \"metallb-operator-webhook-server-d76ff59f5-8bdc9\" (UID: \"1c731f8b-9333-4076-b193-54255a31e938\") " pod="metallb-system/metallb-operator-webhook-server-d76ff59f5-8bdc9" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.620469 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1c731f8b-9333-4076-b193-54255a31e938-apiservice-cert\") pod \"metallb-operator-webhook-server-d76ff59f5-8bdc9\" (UID: \"1c731f8b-9333-4076-b193-54255a31e938\") " pod="metallb-system/metallb-operator-webhook-server-d76ff59f5-8bdc9" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.638379 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45zff\" (UniqueName: \"kubernetes.io/projected/1c731f8b-9333-4076-b193-54255a31e938-kube-api-access-45zff\") pod \"metallb-operator-webhook-server-d76ff59f5-8bdc9\" (UID: \"1c731f8b-9333-4076-b193-54255a31e938\") " pod="metallb-system/metallb-operator-webhook-server-d76ff59f5-8bdc9" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.770797 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-d76ff59f5-8bdc9" Nov 26 22:36:31 crc kubenswrapper[4903]: I1126 22:36:31.965939 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb"] Nov 26 22:36:32 crc kubenswrapper[4903]: I1126 22:36:32.202553 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" event={"ID":"b5900302-4880-4732-a477-8ed6cf3bfec3","Type":"ContainerStarted","Data":"035c72852c6a323f0f57805549b0d7d924b7db9e5c36cc483f667179d57dbb73"} Nov 26 22:36:32 crc kubenswrapper[4903]: I1126 22:36:32.236168 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-d76ff59f5-8bdc9"] Nov 26 22:36:33 crc kubenswrapper[4903]: I1126 22:36:33.210246 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-d76ff59f5-8bdc9" event={"ID":"1c731f8b-9333-4076-b193-54255a31e938","Type":"ContainerStarted","Data":"40514c62cb19c215d96ecb814476156c38c1efd05920e94cf42c1868e8ee8cb6"} Nov 26 22:36:38 crc kubenswrapper[4903]: I1126 22:36:38.260137 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" event={"ID":"b5900302-4880-4732-a477-8ed6cf3bfec3","Type":"ContainerStarted","Data":"c77f86cbf5009902eeebfa382a21dc318a865c783305069fa931fe705dbfda82"} Nov 26 22:36:38 crc kubenswrapper[4903]: I1126 22:36:38.260834 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" Nov 26 22:36:38 crc kubenswrapper[4903]: I1126 22:36:38.262285 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-d76ff59f5-8bdc9" event={"ID":"1c731f8b-9333-4076-b193-54255a31e938","Type":"ContainerStarted","Data":"3eab262027c4c654102e5726929d85c95b01615b65d801ad2c3c92b32fb12cb4"} Nov 26 22:36:38 crc kubenswrapper[4903]: I1126 22:36:38.262432 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-d76ff59f5-8bdc9" Nov 26 22:36:38 crc kubenswrapper[4903]: I1126 22:36:38.286375 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" podStartSLOduration=2.227837617 podStartE2EDuration="7.286352966s" podCreationTimestamp="2025-11-26 22:36:31 +0000 UTC" firstStartedPulling="2025-11-26 22:36:31.981722474 +0000 UTC m=+920.671957384" lastFinishedPulling="2025-11-26 22:36:37.040237823 +0000 UTC m=+925.730472733" observedRunningTime="2025-11-26 22:36:38.281945409 +0000 UTC m=+926.972180339" watchObservedRunningTime="2025-11-26 22:36:38.286352966 +0000 UTC m=+926.976587886" Nov 26 22:36:38 crc kubenswrapper[4903]: I1126 22:36:38.308670 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-d76ff59f5-8bdc9" podStartSLOduration=2.486461874 podStartE2EDuration="7.308654528s" podCreationTimestamp="2025-11-26 22:36:31 +0000 UTC" firstStartedPulling="2025-11-26 22:36:32.244702666 +0000 UTC m=+920.934937576" lastFinishedPulling="2025-11-26 22:36:37.06689529 +0000 UTC m=+925.757130230" observedRunningTime="2025-11-26 22:36:38.308246338 +0000 UTC m=+926.998481268" watchObservedRunningTime="2025-11-26 22:36:38.308654528 +0000 UTC m=+926.998889438" Nov 26 22:36:51 crc kubenswrapper[4903]: I1126 22:36:51.777826 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-d76ff59f5-8bdc9" Nov 26 22:37:11 crc kubenswrapper[4903]: I1126 22:37:11.495908 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.270561 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-qxrcx"] Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.276131 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-s9t6m"] Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.277064 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-qxrcx" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.277324 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-s9t6m" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.280435 4903 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.280676 4903 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.280834 4903 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-t9wfd" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.281068 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.313981 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-s9t6m"] Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.371702 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-f2g89"] Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.373137 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-f2g89" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.375060 4903 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.375935 4903 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-b97hq" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.376226 4903 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.385030 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-hw6vb"] Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.386362 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-hw6vb" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.387343 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.389735 4903 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.394204 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5317f83c-9fcf-4df1-9823-bb92767545a9-cert\") pod \"frr-k8s-webhook-server-6998585d5-s9t6m\" (UID: \"5317f83c-9fcf-4df1-9823-bb92767545a9\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-s9t6m" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.394240 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/2406eb0a-073a-4339-98f2-baa11ceacaa4-reloader\") pod \"frr-k8s-qxrcx\" (UID: \"2406eb0a-073a-4339-98f2-baa11ceacaa4\") " pod="metallb-system/frr-k8s-qxrcx" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.394260 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/2406eb0a-073a-4339-98f2-baa11ceacaa4-metrics\") pod \"frr-k8s-qxrcx\" (UID: \"2406eb0a-073a-4339-98f2-baa11ceacaa4\") " pod="metallb-system/frr-k8s-qxrcx" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.394288 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t5bhh\" (UniqueName: \"kubernetes.io/projected/2406eb0a-073a-4339-98f2-baa11ceacaa4-kube-api-access-t5bhh\") pod \"frr-k8s-qxrcx\" (UID: \"2406eb0a-073a-4339-98f2-baa11ceacaa4\") " pod="metallb-system/frr-k8s-qxrcx" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.394315 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/2406eb0a-073a-4339-98f2-baa11ceacaa4-frr-startup\") pod \"frr-k8s-qxrcx\" (UID: \"2406eb0a-073a-4339-98f2-baa11ceacaa4\") " pod="metallb-system/frr-k8s-qxrcx" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.394344 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2406eb0a-073a-4339-98f2-baa11ceacaa4-metrics-certs\") pod \"frr-k8s-qxrcx\" (UID: \"2406eb0a-073a-4339-98f2-baa11ceacaa4\") " pod="metallb-system/frr-k8s-qxrcx" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.394399 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/2406eb0a-073a-4339-98f2-baa11ceacaa4-frr-sockets\") pod \"frr-k8s-qxrcx\" (UID: \"2406eb0a-073a-4339-98f2-baa11ceacaa4\") " pod="metallb-system/frr-k8s-qxrcx" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.394471 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtm9l\" (UniqueName: \"kubernetes.io/projected/5317f83c-9fcf-4df1-9823-bb92767545a9-kube-api-access-qtm9l\") pod \"frr-k8s-webhook-server-6998585d5-s9t6m\" (UID: \"5317f83c-9fcf-4df1-9823-bb92767545a9\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-s9t6m" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.394516 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/2406eb0a-073a-4339-98f2-baa11ceacaa4-frr-conf\") pod \"frr-k8s-qxrcx\" (UID: \"2406eb0a-073a-4339-98f2-baa11ceacaa4\") " pod="metallb-system/frr-k8s-qxrcx" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.397511 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-hw6vb"] Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.495943 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2406eb0a-073a-4339-98f2-baa11ceacaa4-metrics-certs\") pod \"frr-k8s-qxrcx\" (UID: \"2406eb0a-073a-4339-98f2-baa11ceacaa4\") " pod="metallb-system/frr-k8s-qxrcx" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.495982 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a9e12d32-ef72-446c-b317-8d00a90a651b-cert\") pod \"controller-6c7b4b5f48-hw6vb\" (UID: \"a9e12d32-ef72-446c-b317-8d00a90a651b\") " pod="metallb-system/controller-6c7b4b5f48-hw6vb" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.496007 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/2406eb0a-073a-4339-98f2-baa11ceacaa4-frr-sockets\") pod \"frr-k8s-qxrcx\" (UID: \"2406eb0a-073a-4339-98f2-baa11ceacaa4\") " pod="metallb-system/frr-k8s-qxrcx" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.496061 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qtm9l\" (UniqueName: \"kubernetes.io/projected/5317f83c-9fcf-4df1-9823-bb92767545a9-kube-api-access-qtm9l\") pod \"frr-k8s-webhook-server-6998585d5-s9t6m\" (UID: \"5317f83c-9fcf-4df1-9823-bb92767545a9\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-s9t6m" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.496096 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/61e82f3d-2aca-46e7-bd0f-12c8b492c14e-memberlist\") pod \"speaker-f2g89\" (UID: \"61e82f3d-2aca-46e7-bd0f-12c8b492c14e\") " pod="metallb-system/speaker-f2g89" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.496123 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a9e12d32-ef72-446c-b317-8d00a90a651b-metrics-certs\") pod \"controller-6c7b4b5f48-hw6vb\" (UID: \"a9e12d32-ef72-446c-b317-8d00a90a651b\") " pod="metallb-system/controller-6c7b4b5f48-hw6vb" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.496148 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/61e82f3d-2aca-46e7-bd0f-12c8b492c14e-metallb-excludel2\") pod \"speaker-f2g89\" (UID: \"61e82f3d-2aca-46e7-bd0f-12c8b492c14e\") " pod="metallb-system/speaker-f2g89" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.496165 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/2406eb0a-073a-4339-98f2-baa11ceacaa4-frr-conf\") pod \"frr-k8s-qxrcx\" (UID: \"2406eb0a-073a-4339-98f2-baa11ceacaa4\") " pod="metallb-system/frr-k8s-qxrcx" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.496183 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5317f83c-9fcf-4df1-9823-bb92767545a9-cert\") pod \"frr-k8s-webhook-server-6998585d5-s9t6m\" (UID: \"5317f83c-9fcf-4df1-9823-bb92767545a9\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-s9t6m" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.496199 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/2406eb0a-073a-4339-98f2-baa11ceacaa4-reloader\") pod \"frr-k8s-qxrcx\" (UID: \"2406eb0a-073a-4339-98f2-baa11ceacaa4\") " pod="metallb-system/frr-k8s-qxrcx" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.496215 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/2406eb0a-073a-4339-98f2-baa11ceacaa4-metrics\") pod \"frr-k8s-qxrcx\" (UID: \"2406eb0a-073a-4339-98f2-baa11ceacaa4\") " pod="metallb-system/frr-k8s-qxrcx" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.496237 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t5bhh\" (UniqueName: \"kubernetes.io/projected/2406eb0a-073a-4339-98f2-baa11ceacaa4-kube-api-access-t5bhh\") pod \"frr-k8s-qxrcx\" (UID: \"2406eb0a-073a-4339-98f2-baa11ceacaa4\") " pod="metallb-system/frr-k8s-qxrcx" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.496256 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gdllz\" (UniqueName: \"kubernetes.io/projected/61e82f3d-2aca-46e7-bd0f-12c8b492c14e-kube-api-access-gdllz\") pod \"speaker-f2g89\" (UID: \"61e82f3d-2aca-46e7-bd0f-12c8b492c14e\") " pod="metallb-system/speaker-f2g89" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.496276 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/2406eb0a-073a-4339-98f2-baa11ceacaa4-frr-startup\") pod \"frr-k8s-qxrcx\" (UID: \"2406eb0a-073a-4339-98f2-baa11ceacaa4\") " pod="metallb-system/frr-k8s-qxrcx" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.496299 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-48h6x\" (UniqueName: \"kubernetes.io/projected/a9e12d32-ef72-446c-b317-8d00a90a651b-kube-api-access-48h6x\") pod \"controller-6c7b4b5f48-hw6vb\" (UID: \"a9e12d32-ef72-446c-b317-8d00a90a651b\") " pod="metallb-system/controller-6c7b4b5f48-hw6vb" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.496315 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/61e82f3d-2aca-46e7-bd0f-12c8b492c14e-metrics-certs\") pod \"speaker-f2g89\" (UID: \"61e82f3d-2aca-46e7-bd0f-12c8b492c14e\") " pod="metallb-system/speaker-f2g89" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.496763 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/2406eb0a-073a-4339-98f2-baa11ceacaa4-frr-sockets\") pod \"frr-k8s-qxrcx\" (UID: \"2406eb0a-073a-4339-98f2-baa11ceacaa4\") " pod="metallb-system/frr-k8s-qxrcx" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.496979 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/2406eb0a-073a-4339-98f2-baa11ceacaa4-metrics\") pod \"frr-k8s-qxrcx\" (UID: \"2406eb0a-073a-4339-98f2-baa11ceacaa4\") " pod="metallb-system/frr-k8s-qxrcx" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.497321 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/2406eb0a-073a-4339-98f2-baa11ceacaa4-reloader\") pod \"frr-k8s-qxrcx\" (UID: \"2406eb0a-073a-4339-98f2-baa11ceacaa4\") " pod="metallb-system/frr-k8s-qxrcx" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.497737 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/2406eb0a-073a-4339-98f2-baa11ceacaa4-frr-startup\") pod \"frr-k8s-qxrcx\" (UID: \"2406eb0a-073a-4339-98f2-baa11ceacaa4\") " pod="metallb-system/frr-k8s-qxrcx" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.498038 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/2406eb0a-073a-4339-98f2-baa11ceacaa4-frr-conf\") pod \"frr-k8s-qxrcx\" (UID: \"2406eb0a-073a-4339-98f2-baa11ceacaa4\") " pod="metallb-system/frr-k8s-qxrcx" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.501468 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5317f83c-9fcf-4df1-9823-bb92767545a9-cert\") pod \"frr-k8s-webhook-server-6998585d5-s9t6m\" (UID: \"5317f83c-9fcf-4df1-9823-bb92767545a9\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-s9t6m" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.515305 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2406eb0a-073a-4339-98f2-baa11ceacaa4-metrics-certs\") pod \"frr-k8s-qxrcx\" (UID: \"2406eb0a-073a-4339-98f2-baa11ceacaa4\") " pod="metallb-system/frr-k8s-qxrcx" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.544163 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qtm9l\" (UniqueName: \"kubernetes.io/projected/5317f83c-9fcf-4df1-9823-bb92767545a9-kube-api-access-qtm9l\") pod \"frr-k8s-webhook-server-6998585d5-s9t6m\" (UID: \"5317f83c-9fcf-4df1-9823-bb92767545a9\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-s9t6m" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.544646 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t5bhh\" (UniqueName: \"kubernetes.io/projected/2406eb0a-073a-4339-98f2-baa11ceacaa4-kube-api-access-t5bhh\") pod \"frr-k8s-qxrcx\" (UID: \"2406eb0a-073a-4339-98f2-baa11ceacaa4\") " pod="metallb-system/frr-k8s-qxrcx" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.604535 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/61e82f3d-2aca-46e7-bd0f-12c8b492c14e-memberlist\") pod \"speaker-f2g89\" (UID: \"61e82f3d-2aca-46e7-bd0f-12c8b492c14e\") " pod="metallb-system/speaker-f2g89" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.604591 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a9e12d32-ef72-446c-b317-8d00a90a651b-metrics-certs\") pod \"controller-6c7b4b5f48-hw6vb\" (UID: \"a9e12d32-ef72-446c-b317-8d00a90a651b\") " pod="metallb-system/controller-6c7b4b5f48-hw6vb" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.604620 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/61e82f3d-2aca-46e7-bd0f-12c8b492c14e-metallb-excludel2\") pod \"speaker-f2g89\" (UID: \"61e82f3d-2aca-46e7-bd0f-12c8b492c14e\") " pod="metallb-system/speaker-f2g89" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.604660 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gdllz\" (UniqueName: \"kubernetes.io/projected/61e82f3d-2aca-46e7-bd0f-12c8b492c14e-kube-api-access-gdllz\") pod \"speaker-f2g89\" (UID: \"61e82f3d-2aca-46e7-bd0f-12c8b492c14e\") " pod="metallb-system/speaker-f2g89" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.604701 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-48h6x\" (UniqueName: \"kubernetes.io/projected/a9e12d32-ef72-446c-b317-8d00a90a651b-kube-api-access-48h6x\") pod \"controller-6c7b4b5f48-hw6vb\" (UID: \"a9e12d32-ef72-446c-b317-8d00a90a651b\") " pod="metallb-system/controller-6c7b4b5f48-hw6vb" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.604718 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/61e82f3d-2aca-46e7-bd0f-12c8b492c14e-metrics-certs\") pod \"speaker-f2g89\" (UID: \"61e82f3d-2aca-46e7-bd0f-12c8b492c14e\") " pod="metallb-system/speaker-f2g89" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.604739 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a9e12d32-ef72-446c-b317-8d00a90a651b-cert\") pod \"controller-6c7b4b5f48-hw6vb\" (UID: \"a9e12d32-ef72-446c-b317-8d00a90a651b\") " pod="metallb-system/controller-6c7b4b5f48-hw6vb" Nov 26 22:37:12 crc kubenswrapper[4903]: E1126 22:37:12.604890 4903 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 26 22:37:12 crc kubenswrapper[4903]: E1126 22:37:12.604940 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/61e82f3d-2aca-46e7-bd0f-12c8b492c14e-memberlist podName:61e82f3d-2aca-46e7-bd0f-12c8b492c14e nodeName:}" failed. No retries permitted until 2025-11-26 22:37:13.104922449 +0000 UTC m=+961.795157359 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/61e82f3d-2aca-46e7-bd0f-12c8b492c14e-memberlist") pod "speaker-f2g89" (UID: "61e82f3d-2aca-46e7-bd0f-12c8b492c14e") : secret "metallb-memberlist" not found Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.610317 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a9e12d32-ef72-446c-b317-8d00a90a651b-metrics-certs\") pod \"controller-6c7b4b5f48-hw6vb\" (UID: \"a9e12d32-ef72-446c-b317-8d00a90a651b\") " pod="metallb-system/controller-6c7b4b5f48-hw6vb" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.614060 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-qxrcx" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.614299 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/61e82f3d-2aca-46e7-bd0f-12c8b492c14e-metallb-excludel2\") pod \"speaker-f2g89\" (UID: \"61e82f3d-2aca-46e7-bd0f-12c8b492c14e\") " pod="metallb-system/speaker-f2g89" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.616963 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/61e82f3d-2aca-46e7-bd0f-12c8b492c14e-metrics-certs\") pod \"speaker-f2g89\" (UID: \"61e82f3d-2aca-46e7-bd0f-12c8b492c14e\") " pod="metallb-system/speaker-f2g89" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.625074 4903 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.630787 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-s9t6m" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.633480 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gdllz\" (UniqueName: \"kubernetes.io/projected/61e82f3d-2aca-46e7-bd0f-12c8b492c14e-kube-api-access-gdllz\") pod \"speaker-f2g89\" (UID: \"61e82f3d-2aca-46e7-bd0f-12c8b492c14e\") " pod="metallb-system/speaker-f2g89" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.638379 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-48h6x\" (UniqueName: \"kubernetes.io/projected/a9e12d32-ef72-446c-b317-8d00a90a651b-kube-api-access-48h6x\") pod \"controller-6c7b4b5f48-hw6vb\" (UID: \"a9e12d32-ef72-446c-b317-8d00a90a651b\") " pod="metallb-system/controller-6c7b4b5f48-hw6vb" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.646088 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a9e12d32-ef72-446c-b317-8d00a90a651b-cert\") pod \"controller-6c7b4b5f48-hw6vb\" (UID: \"a9e12d32-ef72-446c-b317-8d00a90a651b\") " pod="metallb-system/controller-6c7b4b5f48-hw6vb" Nov 26 22:37:12 crc kubenswrapper[4903]: I1126 22:37:12.704815 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-hw6vb" Nov 26 22:37:13 crc kubenswrapper[4903]: I1126 22:37:13.072900 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-s9t6m"] Nov 26 22:37:13 crc kubenswrapper[4903]: W1126 22:37:13.082575 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5317f83c_9fcf_4df1_9823_bb92767545a9.slice/crio-516b00221c2783a43ca6d366a243e6d12331651249845a92a48f8b1ead47255d WatchSource:0}: Error finding container 516b00221c2783a43ca6d366a243e6d12331651249845a92a48f8b1ead47255d: Status 404 returned error can't find the container with id 516b00221c2783a43ca6d366a243e6d12331651249845a92a48f8b1ead47255d Nov 26 22:37:13 crc kubenswrapper[4903]: I1126 22:37:13.119149 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/61e82f3d-2aca-46e7-bd0f-12c8b492c14e-memberlist\") pod \"speaker-f2g89\" (UID: \"61e82f3d-2aca-46e7-bd0f-12c8b492c14e\") " pod="metallb-system/speaker-f2g89" Nov 26 22:37:13 crc kubenswrapper[4903]: E1126 22:37:13.119328 4903 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 26 22:37:13 crc kubenswrapper[4903]: E1126 22:37:13.119407 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/61e82f3d-2aca-46e7-bd0f-12c8b492c14e-memberlist podName:61e82f3d-2aca-46e7-bd0f-12c8b492c14e nodeName:}" failed. No retries permitted until 2025-11-26 22:37:14.119391097 +0000 UTC m=+962.809625997 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/61e82f3d-2aca-46e7-bd0f-12c8b492c14e-memberlist") pod "speaker-f2g89" (UID: "61e82f3d-2aca-46e7-bd0f-12c8b492c14e") : secret "metallb-memberlist" not found Nov 26 22:37:13 crc kubenswrapper[4903]: W1126 22:37:13.171644 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda9e12d32_ef72_446c_b317_8d00a90a651b.slice/crio-ee23e574e8127d561a627194e52d7c6fc6367e8565da86e4a7fe44ce29fa39e7 WatchSource:0}: Error finding container ee23e574e8127d561a627194e52d7c6fc6367e8565da86e4a7fe44ce29fa39e7: Status 404 returned error can't find the container with id ee23e574e8127d561a627194e52d7c6fc6367e8565da86e4a7fe44ce29fa39e7 Nov 26 22:37:13 crc kubenswrapper[4903]: I1126 22:37:13.176076 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-hw6vb"] Nov 26 22:37:13 crc kubenswrapper[4903]: I1126 22:37:13.624368 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-hw6vb" event={"ID":"a9e12d32-ef72-446c-b317-8d00a90a651b","Type":"ContainerStarted","Data":"7e051cbc5ac3d1c70b3a70aebdccc89c6dcd22b494cee3a4023bee6490ebd082"} Nov 26 22:37:13 crc kubenswrapper[4903]: I1126 22:37:13.624745 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-hw6vb" event={"ID":"a9e12d32-ef72-446c-b317-8d00a90a651b","Type":"ContainerStarted","Data":"03af32a426adce86fe49bf7537e8f8c458325eb493f6e86b8aff9d514beb27a1"} Nov 26 22:37:13 crc kubenswrapper[4903]: I1126 22:37:13.624767 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-hw6vb" Nov 26 22:37:13 crc kubenswrapper[4903]: I1126 22:37:13.624779 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-hw6vb" event={"ID":"a9e12d32-ef72-446c-b317-8d00a90a651b","Type":"ContainerStarted","Data":"ee23e574e8127d561a627194e52d7c6fc6367e8565da86e4a7fe44ce29fa39e7"} Nov 26 22:37:13 crc kubenswrapper[4903]: I1126 22:37:13.625934 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qxrcx" event={"ID":"2406eb0a-073a-4339-98f2-baa11ceacaa4","Type":"ContainerStarted","Data":"bb0204fdf971a110ca9bf79338cd8af8496a33ac5e570874ac6169434c2165b9"} Nov 26 22:37:13 crc kubenswrapper[4903]: I1126 22:37:13.627108 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-s9t6m" event={"ID":"5317f83c-9fcf-4df1-9823-bb92767545a9","Type":"ContainerStarted","Data":"516b00221c2783a43ca6d366a243e6d12331651249845a92a48f8b1ead47255d"} Nov 26 22:37:13 crc kubenswrapper[4903]: I1126 22:37:13.650800 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-hw6vb" podStartSLOduration=1.6507794850000002 podStartE2EDuration="1.650779485s" podCreationTimestamp="2025-11-26 22:37:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:37:13.64792567 +0000 UTC m=+962.338160590" watchObservedRunningTime="2025-11-26 22:37:13.650779485 +0000 UTC m=+962.341014415" Nov 26 22:37:14 crc kubenswrapper[4903]: I1126 22:37:14.138725 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/61e82f3d-2aca-46e7-bd0f-12c8b492c14e-memberlist\") pod \"speaker-f2g89\" (UID: \"61e82f3d-2aca-46e7-bd0f-12c8b492c14e\") " pod="metallb-system/speaker-f2g89" Nov 26 22:37:14 crc kubenswrapper[4903]: I1126 22:37:14.157490 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/61e82f3d-2aca-46e7-bd0f-12c8b492c14e-memberlist\") pod \"speaker-f2g89\" (UID: \"61e82f3d-2aca-46e7-bd0f-12c8b492c14e\") " pod="metallb-system/speaker-f2g89" Nov 26 22:37:14 crc kubenswrapper[4903]: I1126 22:37:14.196315 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-f2g89" Nov 26 22:37:14 crc kubenswrapper[4903]: W1126 22:37:14.230406 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod61e82f3d_2aca_46e7_bd0f_12c8b492c14e.slice/crio-303afaa44f917d3d9f0fdd0fdbf5b5628f4966634b9bab8c5e2c0fe0bdcb9a7f WatchSource:0}: Error finding container 303afaa44f917d3d9f0fdd0fdbf5b5628f4966634b9bab8c5e2c0fe0bdcb9a7f: Status 404 returned error can't find the container with id 303afaa44f917d3d9f0fdd0fdbf5b5628f4966634b9bab8c5e2c0fe0bdcb9a7f Nov 26 22:37:14 crc kubenswrapper[4903]: I1126 22:37:14.637988 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-f2g89" event={"ID":"61e82f3d-2aca-46e7-bd0f-12c8b492c14e","Type":"ContainerStarted","Data":"0e960cb21fd013ca0d020ef86d33af21676c2ae93eef43e993196fd020c09155"} Nov 26 22:37:14 crc kubenswrapper[4903]: I1126 22:37:14.638321 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-f2g89" event={"ID":"61e82f3d-2aca-46e7-bd0f-12c8b492c14e","Type":"ContainerStarted","Data":"303afaa44f917d3d9f0fdd0fdbf5b5628f4966634b9bab8c5e2c0fe0bdcb9a7f"} Nov 26 22:37:15 crc kubenswrapper[4903]: I1126 22:37:15.645955 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-f2g89" event={"ID":"61e82f3d-2aca-46e7-bd0f-12c8b492c14e","Type":"ContainerStarted","Data":"dd570f288494b8a28784fc29ad92ca1441a6c34103222206a251e5c21b586568"} Nov 26 22:37:15 crc kubenswrapper[4903]: I1126 22:37:15.646134 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-f2g89" Nov 26 22:37:15 crc kubenswrapper[4903]: I1126 22:37:15.662139 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-f2g89" podStartSLOduration=3.662121614 podStartE2EDuration="3.662121614s" podCreationTimestamp="2025-11-26 22:37:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:37:15.659475774 +0000 UTC m=+964.349710684" watchObservedRunningTime="2025-11-26 22:37:15.662121614 +0000 UTC m=+964.352356524" Nov 26 22:37:21 crc kubenswrapper[4903]: I1126 22:37:21.697842 4903 generic.go:334] "Generic (PLEG): container finished" podID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerID="271b50da5c6c92ea0b2e59313c7c5816f9e97ce440d464e66ac83303b528355f" exitCode=0 Nov 26 22:37:21 crc kubenswrapper[4903]: I1126 22:37:21.697931 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qxrcx" event={"ID":"2406eb0a-073a-4339-98f2-baa11ceacaa4","Type":"ContainerDied","Data":"271b50da5c6c92ea0b2e59313c7c5816f9e97ce440d464e66ac83303b528355f"} Nov 26 22:37:21 crc kubenswrapper[4903]: I1126 22:37:21.701125 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-s9t6m" event={"ID":"5317f83c-9fcf-4df1-9823-bb92767545a9","Type":"ContainerStarted","Data":"57a5cd55b5ebe258f7302f985442c228c38cc7a2f24733e434a5b27c40630bf5"} Nov 26 22:37:21 crc kubenswrapper[4903]: I1126 22:37:21.701370 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6998585d5-s9t6m" Nov 26 22:37:21 crc kubenswrapper[4903]: I1126 22:37:21.746889 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6998585d5-s9t6m" podStartSLOduration=1.860904284 podStartE2EDuration="9.746865538s" podCreationTimestamp="2025-11-26 22:37:12 +0000 UTC" firstStartedPulling="2025-11-26 22:37:13.085177379 +0000 UTC m=+961.775412309" lastFinishedPulling="2025-11-26 22:37:20.971138603 +0000 UTC m=+969.661373563" observedRunningTime="2025-11-26 22:37:21.745892042 +0000 UTC m=+970.436126982" watchObservedRunningTime="2025-11-26 22:37:21.746865538 +0000 UTC m=+970.437100478" Nov 26 22:37:22 crc kubenswrapper[4903]: I1126 22:37:22.714633 4903 generic.go:334] "Generic (PLEG): container finished" podID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerID="18e7f1cdcd8f50ff37ba19a06b5ef9bb4c86a1190a035247c2ee5d98484f6c06" exitCode=0 Nov 26 22:37:22 crc kubenswrapper[4903]: I1126 22:37:22.714731 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qxrcx" event={"ID":"2406eb0a-073a-4339-98f2-baa11ceacaa4","Type":"ContainerDied","Data":"18e7f1cdcd8f50ff37ba19a06b5ef9bb4c86a1190a035247c2ee5d98484f6c06"} Nov 26 22:37:23 crc kubenswrapper[4903]: I1126 22:37:23.729041 4903 generic.go:334] "Generic (PLEG): container finished" podID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerID="4e42cad64ff42d36b9963b52437154707a0209d908848872d19cf2ef6c5491d3" exitCode=0 Nov 26 22:37:23 crc kubenswrapper[4903]: I1126 22:37:23.729109 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qxrcx" event={"ID":"2406eb0a-073a-4339-98f2-baa11ceacaa4","Type":"ContainerDied","Data":"4e42cad64ff42d36b9963b52437154707a0209d908848872d19cf2ef6c5491d3"} Nov 26 22:37:24 crc kubenswrapper[4903]: I1126 22:37:24.201068 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-f2g89" Nov 26 22:37:24 crc kubenswrapper[4903]: I1126 22:37:24.744877 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qxrcx" event={"ID":"2406eb0a-073a-4339-98f2-baa11ceacaa4","Type":"ContainerStarted","Data":"35422cd04228e0dba9a40355200b7370644eb84e414493d6a0b1ecdc48d55373"} Nov 26 22:37:24 crc kubenswrapper[4903]: I1126 22:37:24.745320 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qxrcx" event={"ID":"2406eb0a-073a-4339-98f2-baa11ceacaa4","Type":"ContainerStarted","Data":"ac099ea3ebd7e1d6ce9c0dc4b9ae7e278c5dfb8ba676037baeb02deccd638d52"} Nov 26 22:37:24 crc kubenswrapper[4903]: I1126 22:37:24.745339 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qxrcx" event={"ID":"2406eb0a-073a-4339-98f2-baa11ceacaa4","Type":"ContainerStarted","Data":"f69da2585c146bc9a444441ea910f352fa960b9134b70230caa761cc423b17e9"} Nov 26 22:37:24 crc kubenswrapper[4903]: I1126 22:37:24.745352 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qxrcx" event={"ID":"2406eb0a-073a-4339-98f2-baa11ceacaa4","Type":"ContainerStarted","Data":"f348679967e52c62752f42b001275a4cee1b022a4336d16421358cb8048332cb"} Nov 26 22:37:25 crc kubenswrapper[4903]: I1126 22:37:25.765382 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qxrcx" event={"ID":"2406eb0a-073a-4339-98f2-baa11ceacaa4","Type":"ContainerStarted","Data":"c4447939d076ee9886aa5135a2dae7189ba347d6de35dfefd6064a8138d4973c"} Nov 26 22:37:25 crc kubenswrapper[4903]: I1126 22:37:25.765797 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-qxrcx" Nov 26 22:37:25 crc kubenswrapper[4903]: I1126 22:37:25.765814 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qxrcx" event={"ID":"2406eb0a-073a-4339-98f2-baa11ceacaa4","Type":"ContainerStarted","Data":"fafe3b2dcaf18806767c862ffaf70758fc8a5b050ea74bfcf011b49c5621f8c9"} Nov 26 22:37:25 crc kubenswrapper[4903]: I1126 22:37:25.804402 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-qxrcx" podStartSLOduration=5.632273325 podStartE2EDuration="13.804369226s" podCreationTimestamp="2025-11-26 22:37:12 +0000 UTC" firstStartedPulling="2025-11-26 22:37:12.76919603 +0000 UTC m=+961.459430940" lastFinishedPulling="2025-11-26 22:37:20.941291891 +0000 UTC m=+969.631526841" observedRunningTime="2025-11-26 22:37:25.79348861 +0000 UTC m=+974.483723520" watchObservedRunningTime="2025-11-26 22:37:25.804369226 +0000 UTC m=+974.494604156" Nov 26 22:37:26 crc kubenswrapper[4903]: I1126 22:37:26.990740 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-zljmf"] Nov 26 22:37:26 crc kubenswrapper[4903]: I1126 22:37:26.993023 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-zljmf" Nov 26 22:37:26 crc kubenswrapper[4903]: I1126 22:37:26.995668 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-lhfsx" Nov 26 22:37:26 crc kubenswrapper[4903]: I1126 22:37:26.996641 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 26 22:37:26 crc kubenswrapper[4903]: I1126 22:37:26.996725 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 26 22:37:27 crc kubenswrapper[4903]: I1126 22:37:27.004642 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-zljmf"] Nov 26 22:37:27 crc kubenswrapper[4903]: I1126 22:37:27.098900 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjd4r\" (UniqueName: \"kubernetes.io/projected/a348b7af-eb1c-4c45-8611-9a37a4ee9ac7-kube-api-access-gjd4r\") pod \"openstack-operator-index-zljmf\" (UID: \"a348b7af-eb1c-4c45-8611-9a37a4ee9ac7\") " pod="openstack-operators/openstack-operator-index-zljmf" Nov 26 22:37:27 crc kubenswrapper[4903]: I1126 22:37:27.201328 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjd4r\" (UniqueName: \"kubernetes.io/projected/a348b7af-eb1c-4c45-8611-9a37a4ee9ac7-kube-api-access-gjd4r\") pod \"openstack-operator-index-zljmf\" (UID: \"a348b7af-eb1c-4c45-8611-9a37a4ee9ac7\") " pod="openstack-operators/openstack-operator-index-zljmf" Nov 26 22:37:27 crc kubenswrapper[4903]: I1126 22:37:27.225919 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjd4r\" (UniqueName: \"kubernetes.io/projected/a348b7af-eb1c-4c45-8611-9a37a4ee9ac7-kube-api-access-gjd4r\") pod \"openstack-operator-index-zljmf\" (UID: \"a348b7af-eb1c-4c45-8611-9a37a4ee9ac7\") " pod="openstack-operators/openstack-operator-index-zljmf" Nov 26 22:37:27 crc kubenswrapper[4903]: I1126 22:37:27.317329 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-zljmf" Nov 26 22:37:27 crc kubenswrapper[4903]: I1126 22:37:27.615446 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-qxrcx" Nov 26 22:37:27 crc kubenswrapper[4903]: I1126 22:37:27.649187 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-qxrcx" Nov 26 22:37:27 crc kubenswrapper[4903]: I1126 22:37:27.806860 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-zljmf"] Nov 26 22:37:28 crc kubenswrapper[4903]: I1126 22:37:28.805345 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-zljmf" event={"ID":"a348b7af-eb1c-4c45-8611-9a37a4ee9ac7","Type":"ContainerStarted","Data":"69882ed50b4b5bbcad91b3f6b166b7087b67ebadc5d61c020410137aa320e54e"} Nov 26 22:37:30 crc kubenswrapper[4903]: I1126 22:37:30.826734 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-zljmf" event={"ID":"a348b7af-eb1c-4c45-8611-9a37a4ee9ac7","Type":"ContainerStarted","Data":"f821f4966ffa5b322b2838474a75806ed62398eb78f94b539437d4a116500ec7"} Nov 26 22:37:30 crc kubenswrapper[4903]: I1126 22:37:30.863472 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-zljmf" podStartSLOduration=2.42379619 podStartE2EDuration="4.863452073s" podCreationTimestamp="2025-11-26 22:37:26 +0000 UTC" firstStartedPulling="2025-11-26 22:37:27.824147729 +0000 UTC m=+976.514382639" lastFinishedPulling="2025-11-26 22:37:30.263803582 +0000 UTC m=+978.954038522" observedRunningTime="2025-11-26 22:37:30.862846817 +0000 UTC m=+979.553081777" watchObservedRunningTime="2025-11-26 22:37:30.863452073 +0000 UTC m=+979.553686993" Nov 26 22:37:32 crc kubenswrapper[4903]: I1126 22:37:32.636503 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6998585d5-s9t6m" Nov 26 22:37:32 crc kubenswrapper[4903]: I1126 22:37:32.709929 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-hw6vb" Nov 26 22:37:37 crc kubenswrapper[4903]: I1126 22:37:37.318191 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-zljmf" Nov 26 22:37:37 crc kubenswrapper[4903]: I1126 22:37:37.318947 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-zljmf" Nov 26 22:37:37 crc kubenswrapper[4903]: I1126 22:37:37.367263 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-zljmf" Nov 26 22:37:37 crc kubenswrapper[4903]: I1126 22:37:37.943682 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-zljmf" Nov 26 22:37:42 crc kubenswrapper[4903]: I1126 22:37:42.623075 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-qxrcx" Nov 26 22:37:55 crc kubenswrapper[4903]: I1126 22:37:55.825269 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw"] Nov 26 22:37:55 crc kubenswrapper[4903]: I1126 22:37:55.828538 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw" Nov 26 22:37:55 crc kubenswrapper[4903]: I1126 22:37:55.832989 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-wnztn" Nov 26 22:37:55 crc kubenswrapper[4903]: I1126 22:37:55.849132 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw"] Nov 26 22:37:55 crc kubenswrapper[4903]: I1126 22:37:55.968287 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/34b7b696-e29f-43e0-8186-9ca0219ab924-util\") pod \"20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw\" (UID: \"34b7b696-e29f-43e0-8186-9ca0219ab924\") " pod="openstack-operators/20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw" Nov 26 22:37:55 crc kubenswrapper[4903]: I1126 22:37:55.968385 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxtmr\" (UniqueName: \"kubernetes.io/projected/34b7b696-e29f-43e0-8186-9ca0219ab924-kube-api-access-xxtmr\") pod \"20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw\" (UID: \"34b7b696-e29f-43e0-8186-9ca0219ab924\") " pod="openstack-operators/20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw" Nov 26 22:37:55 crc kubenswrapper[4903]: I1126 22:37:55.968600 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/34b7b696-e29f-43e0-8186-9ca0219ab924-bundle\") pod \"20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw\" (UID: \"34b7b696-e29f-43e0-8186-9ca0219ab924\") " pod="openstack-operators/20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw" Nov 26 22:37:56 crc kubenswrapper[4903]: I1126 22:37:56.070987 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/34b7b696-e29f-43e0-8186-9ca0219ab924-util\") pod \"20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw\" (UID: \"34b7b696-e29f-43e0-8186-9ca0219ab924\") " pod="openstack-operators/20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw" Nov 26 22:37:56 crc kubenswrapper[4903]: I1126 22:37:56.071109 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xxtmr\" (UniqueName: \"kubernetes.io/projected/34b7b696-e29f-43e0-8186-9ca0219ab924-kube-api-access-xxtmr\") pod \"20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw\" (UID: \"34b7b696-e29f-43e0-8186-9ca0219ab924\") " pod="openstack-operators/20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw" Nov 26 22:37:56 crc kubenswrapper[4903]: I1126 22:37:56.071203 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/34b7b696-e29f-43e0-8186-9ca0219ab924-bundle\") pod \"20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw\" (UID: \"34b7b696-e29f-43e0-8186-9ca0219ab924\") " pod="openstack-operators/20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw" Nov 26 22:37:56 crc kubenswrapper[4903]: I1126 22:37:56.071881 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/34b7b696-e29f-43e0-8186-9ca0219ab924-util\") pod \"20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw\" (UID: \"34b7b696-e29f-43e0-8186-9ca0219ab924\") " pod="openstack-operators/20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw" Nov 26 22:37:56 crc kubenswrapper[4903]: I1126 22:37:56.072240 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/34b7b696-e29f-43e0-8186-9ca0219ab924-bundle\") pod \"20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw\" (UID: \"34b7b696-e29f-43e0-8186-9ca0219ab924\") " pod="openstack-operators/20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw" Nov 26 22:37:56 crc kubenswrapper[4903]: I1126 22:37:56.106260 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xxtmr\" (UniqueName: \"kubernetes.io/projected/34b7b696-e29f-43e0-8186-9ca0219ab924-kube-api-access-xxtmr\") pod \"20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw\" (UID: \"34b7b696-e29f-43e0-8186-9ca0219ab924\") " pod="openstack-operators/20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw" Nov 26 22:37:56 crc kubenswrapper[4903]: I1126 22:37:56.159212 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw" Nov 26 22:37:56 crc kubenswrapper[4903]: I1126 22:37:56.684742 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw"] Nov 26 22:37:57 crc kubenswrapper[4903]: I1126 22:37:57.108532 4903 generic.go:334] "Generic (PLEG): container finished" podID="34b7b696-e29f-43e0-8186-9ca0219ab924" containerID="f11decd969ac2f8e16cb491a14b35a4f7cee16838bd235581e1105acc02743ac" exitCode=0 Nov 26 22:37:57 crc kubenswrapper[4903]: I1126 22:37:57.108635 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw" event={"ID":"34b7b696-e29f-43e0-8186-9ca0219ab924","Type":"ContainerDied","Data":"f11decd969ac2f8e16cb491a14b35a4f7cee16838bd235581e1105acc02743ac"} Nov 26 22:37:57 crc kubenswrapper[4903]: I1126 22:37:57.109006 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw" event={"ID":"34b7b696-e29f-43e0-8186-9ca0219ab924","Type":"ContainerStarted","Data":"72cd46cf366cde7a11022d9473376859b5d222c9df90c831a7ac9dbded937bf8"} Nov 26 22:37:58 crc kubenswrapper[4903]: I1126 22:37:58.120621 4903 generic.go:334] "Generic (PLEG): container finished" podID="34b7b696-e29f-43e0-8186-9ca0219ab924" containerID="af9e242ad93a118e459f5a552ded1e7a616ea10ef792ba6c3f142d2a4a42ebee" exitCode=0 Nov 26 22:37:58 crc kubenswrapper[4903]: I1126 22:37:58.120686 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw" event={"ID":"34b7b696-e29f-43e0-8186-9ca0219ab924","Type":"ContainerDied","Data":"af9e242ad93a118e459f5a552ded1e7a616ea10ef792ba6c3f142d2a4a42ebee"} Nov 26 22:37:59 crc kubenswrapper[4903]: I1126 22:37:59.132797 4903 generic.go:334] "Generic (PLEG): container finished" podID="34b7b696-e29f-43e0-8186-9ca0219ab924" containerID="981f8334e630d3b5c76e01e8c939f5cc44df904f567d58406d5730b9f1db1862" exitCode=0 Nov 26 22:37:59 crc kubenswrapper[4903]: I1126 22:37:59.132871 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw" event={"ID":"34b7b696-e29f-43e0-8186-9ca0219ab924","Type":"ContainerDied","Data":"981f8334e630d3b5c76e01e8c939f5cc44df904f567d58406d5730b9f1db1862"} Nov 26 22:38:00 crc kubenswrapper[4903]: I1126 22:38:00.577340 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw" Nov 26 22:38:00 crc kubenswrapper[4903]: I1126 22:38:00.663923 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/34b7b696-e29f-43e0-8186-9ca0219ab924-bundle\") pod \"34b7b696-e29f-43e0-8186-9ca0219ab924\" (UID: \"34b7b696-e29f-43e0-8186-9ca0219ab924\") " Nov 26 22:38:00 crc kubenswrapper[4903]: I1126 22:38:00.664047 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/34b7b696-e29f-43e0-8186-9ca0219ab924-util\") pod \"34b7b696-e29f-43e0-8186-9ca0219ab924\" (UID: \"34b7b696-e29f-43e0-8186-9ca0219ab924\") " Nov 26 22:38:00 crc kubenswrapper[4903]: I1126 22:38:00.664192 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xxtmr\" (UniqueName: \"kubernetes.io/projected/34b7b696-e29f-43e0-8186-9ca0219ab924-kube-api-access-xxtmr\") pod \"34b7b696-e29f-43e0-8186-9ca0219ab924\" (UID: \"34b7b696-e29f-43e0-8186-9ca0219ab924\") " Nov 26 22:38:00 crc kubenswrapper[4903]: I1126 22:38:00.665973 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/34b7b696-e29f-43e0-8186-9ca0219ab924-bundle" (OuterVolumeSpecName: "bundle") pod "34b7b696-e29f-43e0-8186-9ca0219ab924" (UID: "34b7b696-e29f-43e0-8186-9ca0219ab924"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:38:00 crc kubenswrapper[4903]: I1126 22:38:00.672835 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34b7b696-e29f-43e0-8186-9ca0219ab924-kube-api-access-xxtmr" (OuterVolumeSpecName: "kube-api-access-xxtmr") pod "34b7b696-e29f-43e0-8186-9ca0219ab924" (UID: "34b7b696-e29f-43e0-8186-9ca0219ab924"). InnerVolumeSpecName "kube-api-access-xxtmr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:38:00 crc kubenswrapper[4903]: I1126 22:38:00.687097 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/34b7b696-e29f-43e0-8186-9ca0219ab924-util" (OuterVolumeSpecName: "util") pod "34b7b696-e29f-43e0-8186-9ca0219ab924" (UID: "34b7b696-e29f-43e0-8186-9ca0219ab924"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:38:00 crc kubenswrapper[4903]: I1126 22:38:00.766507 4903 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/34b7b696-e29f-43e0-8186-9ca0219ab924-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:38:00 crc kubenswrapper[4903]: I1126 22:38:00.766555 4903 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/34b7b696-e29f-43e0-8186-9ca0219ab924-util\") on node \"crc\" DevicePath \"\"" Nov 26 22:38:00 crc kubenswrapper[4903]: I1126 22:38:00.766588 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xxtmr\" (UniqueName: \"kubernetes.io/projected/34b7b696-e29f-43e0-8186-9ca0219ab924-kube-api-access-xxtmr\") on node \"crc\" DevicePath \"\"" Nov 26 22:38:01 crc kubenswrapper[4903]: I1126 22:38:01.158593 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw" event={"ID":"34b7b696-e29f-43e0-8186-9ca0219ab924","Type":"ContainerDied","Data":"72cd46cf366cde7a11022d9473376859b5d222c9df90c831a7ac9dbded937bf8"} Nov 26 22:38:01 crc kubenswrapper[4903]: I1126 22:38:01.158654 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="72cd46cf366cde7a11022d9473376859b5d222c9df90c831a7ac9dbded937bf8" Nov 26 22:38:01 crc kubenswrapper[4903]: I1126 22:38:01.158688 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw" Nov 26 22:38:01 crc kubenswrapper[4903]: I1126 22:38:01.981744 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 22:38:01 crc kubenswrapper[4903]: I1126 22:38:01.982792 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 22:38:08 crc kubenswrapper[4903]: I1126 22:38:08.734109 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5bd96487c4-8k4kq"] Nov 26 22:38:08 crc kubenswrapper[4903]: E1126 22:38:08.735128 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34b7b696-e29f-43e0-8186-9ca0219ab924" containerName="util" Nov 26 22:38:08 crc kubenswrapper[4903]: I1126 22:38:08.735148 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="34b7b696-e29f-43e0-8186-9ca0219ab924" containerName="util" Nov 26 22:38:08 crc kubenswrapper[4903]: E1126 22:38:08.735189 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34b7b696-e29f-43e0-8186-9ca0219ab924" containerName="extract" Nov 26 22:38:08 crc kubenswrapper[4903]: I1126 22:38:08.735201 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="34b7b696-e29f-43e0-8186-9ca0219ab924" containerName="extract" Nov 26 22:38:08 crc kubenswrapper[4903]: E1126 22:38:08.735257 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34b7b696-e29f-43e0-8186-9ca0219ab924" containerName="pull" Nov 26 22:38:08 crc kubenswrapper[4903]: I1126 22:38:08.735270 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="34b7b696-e29f-43e0-8186-9ca0219ab924" containerName="pull" Nov 26 22:38:08 crc kubenswrapper[4903]: I1126 22:38:08.735533 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="34b7b696-e29f-43e0-8186-9ca0219ab924" containerName="extract" Nov 26 22:38:08 crc kubenswrapper[4903]: I1126 22:38:08.737121 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-5bd96487c4-8k4kq" Nov 26 22:38:08 crc kubenswrapper[4903]: I1126 22:38:08.739152 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-wn2mx" Nov 26 22:38:08 crc kubenswrapper[4903]: I1126 22:38:08.819601 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5bd96487c4-8k4kq"] Nov 26 22:38:08 crc kubenswrapper[4903]: I1126 22:38:08.820713 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gt5kj\" (UniqueName: \"kubernetes.io/projected/651c7100-bdd0-41e2-8a7f-eaab13dfd391-kube-api-access-gt5kj\") pod \"openstack-operator-controller-operator-5bd96487c4-8k4kq\" (UID: \"651c7100-bdd0-41e2-8a7f-eaab13dfd391\") " pod="openstack-operators/openstack-operator-controller-operator-5bd96487c4-8k4kq" Nov 26 22:38:08 crc kubenswrapper[4903]: I1126 22:38:08.924850 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gt5kj\" (UniqueName: \"kubernetes.io/projected/651c7100-bdd0-41e2-8a7f-eaab13dfd391-kube-api-access-gt5kj\") pod \"openstack-operator-controller-operator-5bd96487c4-8k4kq\" (UID: \"651c7100-bdd0-41e2-8a7f-eaab13dfd391\") " pod="openstack-operators/openstack-operator-controller-operator-5bd96487c4-8k4kq" Nov 26 22:38:08 crc kubenswrapper[4903]: I1126 22:38:08.957703 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gt5kj\" (UniqueName: \"kubernetes.io/projected/651c7100-bdd0-41e2-8a7f-eaab13dfd391-kube-api-access-gt5kj\") pod \"openstack-operator-controller-operator-5bd96487c4-8k4kq\" (UID: \"651c7100-bdd0-41e2-8a7f-eaab13dfd391\") " pod="openstack-operators/openstack-operator-controller-operator-5bd96487c4-8k4kq" Nov 26 22:38:09 crc kubenswrapper[4903]: I1126 22:38:09.056978 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-5bd96487c4-8k4kq" Nov 26 22:38:09 crc kubenswrapper[4903]: I1126 22:38:09.557547 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5bd96487c4-8k4kq"] Nov 26 22:38:09 crc kubenswrapper[4903]: W1126 22:38:09.561157 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod651c7100_bdd0_41e2_8a7f_eaab13dfd391.slice/crio-14034373e3271f3ba0f9b9db66380c8d07245d4428d83bd1c94cffb827dfca77 WatchSource:0}: Error finding container 14034373e3271f3ba0f9b9db66380c8d07245d4428d83bd1c94cffb827dfca77: Status 404 returned error can't find the container with id 14034373e3271f3ba0f9b9db66380c8d07245d4428d83bd1c94cffb827dfca77 Nov 26 22:38:10 crc kubenswrapper[4903]: I1126 22:38:10.261363 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5bd96487c4-8k4kq" event={"ID":"651c7100-bdd0-41e2-8a7f-eaab13dfd391","Type":"ContainerStarted","Data":"14034373e3271f3ba0f9b9db66380c8d07245d4428d83bd1c94cffb827dfca77"} Nov 26 22:38:14 crc kubenswrapper[4903]: I1126 22:38:14.304288 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5bd96487c4-8k4kq" event={"ID":"651c7100-bdd0-41e2-8a7f-eaab13dfd391","Type":"ContainerStarted","Data":"804a40a770771087be097ba58b157e480b884d71a51ea79beece38c9cae8b2a9"} Nov 26 22:38:14 crc kubenswrapper[4903]: I1126 22:38:14.304800 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-5bd96487c4-8k4kq" Nov 26 22:38:14 crc kubenswrapper[4903]: I1126 22:38:14.347069 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-5bd96487c4-8k4kq" podStartSLOduration=2.385961229 podStartE2EDuration="6.347047785s" podCreationTimestamp="2025-11-26 22:38:08 +0000 UTC" firstStartedPulling="2025-11-26 22:38:09.563204897 +0000 UTC m=+1018.253439817" lastFinishedPulling="2025-11-26 22:38:13.524291463 +0000 UTC m=+1022.214526373" observedRunningTime="2025-11-26 22:38:14.342013433 +0000 UTC m=+1023.032248373" watchObservedRunningTime="2025-11-26 22:38:14.347047785 +0000 UTC m=+1023.037282735" Nov 26 22:38:19 crc kubenswrapper[4903]: I1126 22:38:19.060769 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-5bd96487c4-8k4kq" Nov 26 22:38:31 crc kubenswrapper[4903]: I1126 22:38:31.981416 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 22:38:31 crc kubenswrapper[4903]: I1126 22:38:31.981913 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.339634 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-6hzbx"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.341418 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-6hzbx" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.344445 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-nntjc" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.359452 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-6hzbx"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.364591 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-n7krq"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.369209 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-n7krq" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.371297 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-bh2qf" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.374460 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-rtztw"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.376452 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-955677c94-rtztw" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.378552 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-hdb9h" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.384527 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-n7krq"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.392334 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-shqxg"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.395685 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-shqxg" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.399949 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-jzrr8" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.414242 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-rtztw"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.456827 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6j99\" (UniqueName: \"kubernetes.io/projected/710215b7-5e67-47d8-833f-b8db638cac56-kube-api-access-w6j99\") pod \"glance-operator-controller-manager-589cbd6b5b-shqxg\" (UID: \"710215b7-5e67-47d8-833f-b8db638cac56\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-shqxg" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.456907 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m24cl\" (UniqueName: \"kubernetes.io/projected/d9a3465f-cd49-4af9-a908-58aec0273dbe-kube-api-access-m24cl\") pod \"cinder-operator-controller-manager-6b7f75547b-n7krq\" (UID: \"d9a3465f-cd49-4af9-a908-58aec0273dbe\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-n7krq" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.456947 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-46c56\" (UniqueName: \"kubernetes.io/projected/3e621847-5f60-491a-8e5c-f2fb10df1726-kube-api-access-46c56\") pod \"barbican-operator-controller-manager-7b64f4fb85-6hzbx\" (UID: \"3e621847-5f60-491a-8e5c-f2fb10df1726\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-6hzbx" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.457022 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9bxtm\" (UniqueName: \"kubernetes.io/projected/63feada5-3911-469e-a0b1-539b7aa2948d-kube-api-access-9bxtm\") pod \"designate-operator-controller-manager-955677c94-rtztw\" (UID: \"63feada5-3911-469e-a0b1-539b7aa2948d\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-rtztw" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.469888 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.471127 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.472832 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-x9sx2" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.477734 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-shqxg"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.486802 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.488094 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.490333 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-jzktn" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.501743 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.503186 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.507479 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.507563 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-phnjg" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.520611 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.530002 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.531241 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.533141 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-jqrgv" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.542956 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.544951 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.553671 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.571922 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b34e8bed-559a-49d6-b870-c375f36be49f-cert\") pod \"infra-operator-controller-manager-57548d458d-tdlsw\" (UID: \"b34e8bed-559a-49d6-b870-c375f36be49f\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.572016 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mzq25\" (UniqueName: \"kubernetes.io/projected/34b48ba8-04a0-463d-9e31-b7c13127ce9c-kube-api-access-mzq25\") pod \"horizon-operator-controller-manager-5d494799bf-v4b66\" (UID: \"34b48ba8-04a0-463d-9e31-b7c13127ce9c\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.572078 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6j99\" (UniqueName: \"kubernetes.io/projected/710215b7-5e67-47d8-833f-b8db638cac56-kube-api-access-w6j99\") pod \"glance-operator-controller-manager-589cbd6b5b-shqxg\" (UID: \"710215b7-5e67-47d8-833f-b8db638cac56\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-shqxg" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.572194 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m24cl\" (UniqueName: \"kubernetes.io/projected/d9a3465f-cd49-4af9-a908-58aec0273dbe-kube-api-access-m24cl\") pod \"cinder-operator-controller-manager-6b7f75547b-n7krq\" (UID: \"d9a3465f-cd49-4af9-a908-58aec0273dbe\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-n7krq" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.572244 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-46c56\" (UniqueName: \"kubernetes.io/projected/3e621847-5f60-491a-8e5c-f2fb10df1726-kube-api-access-46c56\") pod \"barbican-operator-controller-manager-7b64f4fb85-6hzbx\" (UID: \"3e621847-5f60-491a-8e5c-f2fb10df1726\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-6hzbx" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.572344 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mbbmp\" (UniqueName: \"kubernetes.io/projected/b34e8bed-559a-49d6-b870-c375f36be49f-kube-api-access-mbbmp\") pod \"infra-operator-controller-manager-57548d458d-tdlsw\" (UID: \"b34e8bed-559a-49d6-b870-c375f36be49f\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.572380 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9xdn\" (UniqueName: \"kubernetes.io/projected/e0c12217-0537-436e-b0d9-5e5049888268-kube-api-access-w9xdn\") pod \"heat-operator-controller-manager-5b77f656f-x59hr\" (UID: \"e0c12217-0537-436e-b0d9-5e5049888268\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.572407 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9bxtm\" (UniqueName: \"kubernetes.io/projected/63feada5-3911-469e-a0b1-539b7aa2948d-kube-api-access-9bxtm\") pod \"designate-operator-controller-manager-955677c94-rtztw\" (UID: \"63feada5-3911-469e-a0b1-539b7aa2948d\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-rtztw" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.606113 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m24cl\" (UniqueName: \"kubernetes.io/projected/d9a3465f-cd49-4af9-a908-58aec0273dbe-kube-api-access-m24cl\") pod \"cinder-operator-controller-manager-6b7f75547b-n7krq\" (UID: \"d9a3465f-cd49-4af9-a908-58aec0273dbe\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-n7krq" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.631089 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9bxtm\" (UniqueName: \"kubernetes.io/projected/63feada5-3911-469e-a0b1-539b7aa2948d-kube-api-access-9bxtm\") pod \"designate-operator-controller-manager-955677c94-rtztw\" (UID: \"63feada5-3911-469e-a0b1-539b7aa2948d\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-rtztw" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.631510 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-46c56\" (UniqueName: \"kubernetes.io/projected/3e621847-5f60-491a-8e5c-f2fb10df1726-kube-api-access-46c56\") pod \"barbican-operator-controller-manager-7b64f4fb85-6hzbx\" (UID: \"3e621847-5f60-491a-8e5c-f2fb10df1726\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-6hzbx" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.631948 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6j99\" (UniqueName: \"kubernetes.io/projected/710215b7-5e67-47d8-833f-b8db638cac56-kube-api-access-w6j99\") pod \"glance-operator-controller-manager-589cbd6b5b-shqxg\" (UID: \"710215b7-5e67-47d8-833f-b8db638cac56\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-shqxg" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.663741 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-6hzbx" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.664241 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-vj562"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.665876 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vj562" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.667841 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-jnmgr" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.675898 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jzbd4\" (UniqueName: \"kubernetes.io/projected/ced64189-a8c9-4e13-956b-f69139a9602b-kube-api-access-jzbd4\") pod \"ironic-operator-controller-manager-67cb4dc6d4-bm7r7\" (UID: \"ced64189-a8c9-4e13-956b-f69139a9602b\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.676470 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mbbmp\" (UniqueName: \"kubernetes.io/projected/b34e8bed-559a-49d6-b870-c375f36be49f-kube-api-access-mbbmp\") pod \"infra-operator-controller-manager-57548d458d-tdlsw\" (UID: \"b34e8bed-559a-49d6-b870-c375f36be49f\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.676509 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9xdn\" (UniqueName: \"kubernetes.io/projected/e0c12217-0537-436e-b0d9-5e5049888268-kube-api-access-w9xdn\") pod \"heat-operator-controller-manager-5b77f656f-x59hr\" (UID: \"e0c12217-0537-436e-b0d9-5e5049888268\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.676597 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b34e8bed-559a-49d6-b870-c375f36be49f-cert\") pod \"infra-operator-controller-manager-57548d458d-tdlsw\" (UID: \"b34e8bed-559a-49d6-b870-c375f36be49f\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.676629 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mzq25\" (UniqueName: \"kubernetes.io/projected/34b48ba8-04a0-463d-9e31-b7c13127ce9c-kube-api-access-mzq25\") pod \"horizon-operator-controller-manager-5d494799bf-v4b66\" (UID: \"34b48ba8-04a0-463d-9e31-b7c13127ce9c\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66" Nov 26 22:38:48 crc kubenswrapper[4903]: E1126 22:38:48.677907 4903 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 22:38:48 crc kubenswrapper[4903]: E1126 22:38:48.677956 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b34e8bed-559a-49d6-b870-c375f36be49f-cert podName:b34e8bed-559a-49d6-b870-c375f36be49f nodeName:}" failed. No retries permitted until 2025-11-26 22:38:49.177940003 +0000 UTC m=+1057.868174913 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b34e8bed-559a-49d6-b870-c375f36be49f-cert") pod "infra-operator-controller-manager-57548d458d-tdlsw" (UID: "b34e8bed-559a-49d6-b870-c375f36be49f") : secret "infra-operator-webhook-server-cert" not found Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.695407 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9xdn\" (UniqueName: \"kubernetes.io/projected/e0c12217-0537-436e-b0d9-5e5049888268-kube-api-access-w9xdn\") pod \"heat-operator-controller-manager-5b77f656f-x59hr\" (UID: \"e0c12217-0537-436e-b0d9-5e5049888268\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.695641 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mzq25\" (UniqueName: \"kubernetes.io/projected/34b48ba8-04a0-463d-9e31-b7c13127ce9c-kube-api-access-mzq25\") pod \"horizon-operator-controller-manager-5d494799bf-v4b66\" (UID: \"34b48ba8-04a0-463d-9e31-b7c13127ce9c\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.698728 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-vj562"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.699316 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mbbmp\" (UniqueName: \"kubernetes.io/projected/b34e8bed-559a-49d6-b870-c375f36be49f-kube-api-access-mbbmp\") pod \"infra-operator-controller-manager-57548d458d-tdlsw\" (UID: \"b34e8bed-559a-49d6-b870-c375f36be49f\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.700311 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-n7krq" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.708235 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-pzwmk"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.709579 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-pzwmk" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.711448 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-zbvr7" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.711764 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-955677c94-rtztw" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.722555 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.730337 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-shqxg" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.730655 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-pzwmk"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.730806 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.731994 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-g77bs" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.775429 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.778134 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pb8dv\" (UniqueName: \"kubernetes.io/projected/edfb7faf-e9af-4ee8-85cd-a11af5812946-kube-api-access-pb8dv\") pod \"manila-operator-controller-manager-5d499bf58b-vj562\" (UID: \"edfb7faf-e9af-4ee8-85cd-a11af5812946\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vj562" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.778231 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9g2h\" (UniqueName: \"kubernetes.io/projected/32ccd880-8dfa-46d1-b262-5d10422527ec-kube-api-access-q9g2h\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-pzwmk\" (UID: \"32ccd880-8dfa-46d1-b262-5d10422527ec\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-pzwmk" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.778255 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dtsk2\" (UniqueName: \"kubernetes.io/projected/e3d89c00-9723-43a3-a1d2-866787257900-kube-api-access-dtsk2\") pod \"keystone-operator-controller-manager-7b4567c7cf-kxg8s\" (UID: \"e3d89c00-9723-43a3-a1d2-866787257900\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.778287 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jzbd4\" (UniqueName: \"kubernetes.io/projected/ced64189-a8c9-4e13-956b-f69139a9602b-kube-api-access-jzbd4\") pod \"ironic-operator-controller-manager-67cb4dc6d4-bm7r7\" (UID: \"ced64189-a8c9-4e13-956b-f69139a9602b\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.797445 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-t5gqj"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.798807 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-t5gqj" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.802380 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-cxzdg" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.807136 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.809069 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-5kmlf"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.811239 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5kmlf" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.814838 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jzbd4\" (UniqueName: \"kubernetes.io/projected/ced64189-a8c9-4e13-956b-f69139a9602b-kube-api-access-jzbd4\") pod \"ironic-operator-controller-manager-67cb4dc6d4-bm7r7\" (UID: \"ced64189-a8c9-4e13-956b-f69139a9602b\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.819307 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-6jfkt" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.826367 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.845051 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-t5gqj"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.856067 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.857586 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.860783 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-9lhcw" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.876853 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-5kmlf"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.885366 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-88lpk\" (UniqueName: \"kubernetes.io/projected/9c3a16ab-252a-4a01-aaab-b273d3d55c0a-kube-api-access-88lpk\") pod \"neutron-operator-controller-manager-6fdcddb789-t5gqj\" (UID: \"9c3a16ab-252a-4a01-aaab-b273d3d55c0a\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-t5gqj" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.885417 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pb8dv\" (UniqueName: \"kubernetes.io/projected/edfb7faf-e9af-4ee8-85cd-a11af5812946-kube-api-access-pb8dv\") pod \"manila-operator-controller-manager-5d499bf58b-vj562\" (UID: \"edfb7faf-e9af-4ee8-85cd-a11af5812946\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vj562" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.885523 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9g2h\" (UniqueName: \"kubernetes.io/projected/32ccd880-8dfa-46d1-b262-5d10422527ec-kube-api-access-q9g2h\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-pzwmk\" (UID: \"32ccd880-8dfa-46d1-b262-5d10422527ec\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-pzwmk" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.885543 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dtsk2\" (UniqueName: \"kubernetes.io/projected/e3d89c00-9723-43a3-a1d2-866787257900-kube-api-access-dtsk2\") pod \"keystone-operator-controller-manager-7b4567c7cf-kxg8s\" (UID: \"e3d89c00-9723-43a3-a1d2-866787257900\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.885564 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bm5nz\" (UniqueName: \"kubernetes.io/projected/fcacd7dc-2b08-46d7-98c2-09cf6b6d690b-kube-api-access-bm5nz\") pod \"nova-operator-controller-manager-79556f57fc-5kmlf\" (UID: \"fcacd7dc-2b08-46d7-98c2-09cf6b6d690b\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5kmlf" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.891583 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.896154 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.899116 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.900535 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.903291 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.903535 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-5w4jc" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.905806 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-vjt6h"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.907055 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vjt6h" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.911202 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-cbr9g" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.911391 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-jn49q"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.911430 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pb8dv\" (UniqueName: \"kubernetes.io/projected/edfb7faf-e9af-4ee8-85cd-a11af5812946-kube-api-access-pb8dv\") pod \"manila-operator-controller-manager-5d499bf58b-vj562\" (UID: \"edfb7faf-e9af-4ee8-85cd-a11af5812946\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vj562" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.912677 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jn49q" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.914384 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-bg9qc" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.915165 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dtsk2\" (UniqueName: \"kubernetes.io/projected/e3d89c00-9723-43a3-a1d2-866787257900-kube-api-access-dtsk2\") pod \"keystone-operator-controller-manager-7b4567c7cf-kxg8s\" (UID: \"e3d89c00-9723-43a3-a1d2-866787257900\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.919005 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.930139 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9g2h\" (UniqueName: \"kubernetes.io/projected/32ccd880-8dfa-46d1-b262-5d10422527ec-kube-api-access-q9g2h\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-pzwmk\" (UID: \"32ccd880-8dfa-46d1-b262-5d10422527ec\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-pzwmk" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.937254 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-vjt6h"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.954345 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-jn49q"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.973708 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-2h7mb"] Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.976622 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2h7mb" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.982083 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-6cdkz" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.987433 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bm5nz\" (UniqueName: \"kubernetes.io/projected/fcacd7dc-2b08-46d7-98c2-09cf6b6d690b-kube-api-access-bm5nz\") pod \"nova-operator-controller-manager-79556f57fc-5kmlf\" (UID: \"fcacd7dc-2b08-46d7-98c2-09cf6b6d690b\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5kmlf" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.987769 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2kjtz\" (UniqueName: \"kubernetes.io/projected/83927c87-ccd7-4b29-97b1-8d03ce0d1b1e-kube-api-access-2kjtz\") pod \"placement-operator-controller-manager-57988cc5b5-vjt6h\" (UID: \"83927c87-ccd7-4b29-97b1-8d03ce0d1b1e\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vjt6h" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.987876 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ln5t\" (UniqueName: \"kubernetes.io/projected/6b930423-80e6-4e2c-825f-7deceec090f5-kube-api-access-5ln5t\") pod \"octavia-operator-controller-manager-64cdc6ff96-nz8x4\" (UID: \"6b930423-80e6-4e2c-825f-7deceec090f5\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.987898 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9wsrk\" (UniqueName: \"kubernetes.io/projected/0c7b8e09-c502-425e-ac59-b2befd1132fa-kube-api-access-9wsrk\") pod \"ovn-operator-controller-manager-56897c768d-jn49q\" (UID: \"0c7b8e09-c502-425e-ac59-b2befd1132fa\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jn49q" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.987937 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-88lpk\" (UniqueName: \"kubernetes.io/projected/9c3a16ab-252a-4a01-aaab-b273d3d55c0a-kube-api-access-88lpk\") pod \"neutron-operator-controller-manager-6fdcddb789-t5gqj\" (UID: \"9c3a16ab-252a-4a01-aaab-b273d3d55c0a\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-t5gqj" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.988025 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r6bvh\" (UniqueName: \"kubernetes.io/projected/d4e9967e-dcf0-42c1-94fc-fea289ed54c2-kube-api-access-r6bvh\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54\" (UID: \"d4e9967e-dcf0-42c1-94fc-fea289ed54c2\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54" Nov 26 22:38:48 crc kubenswrapper[4903]: I1126 22:38:48.988197 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d4e9967e-dcf0-42c1-94fc-fea289ed54c2-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54\" (UID: \"d4e9967e-dcf0-42c1-94fc-fea289ed54c2\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.002946 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-2h7mb"] Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.007004 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bm5nz\" (UniqueName: \"kubernetes.io/projected/fcacd7dc-2b08-46d7-98c2-09cf6b6d690b-kube-api-access-bm5nz\") pod \"nova-operator-controller-manager-79556f57fc-5kmlf\" (UID: \"fcacd7dc-2b08-46d7-98c2-09cf6b6d690b\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5kmlf" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.018286 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-88lpk\" (UniqueName: \"kubernetes.io/projected/9c3a16ab-252a-4a01-aaab-b273d3d55c0a-kube-api-access-88lpk\") pod \"neutron-operator-controller-manager-6fdcddb789-t5gqj\" (UID: \"9c3a16ab-252a-4a01-aaab-b273d3d55c0a\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-t5gqj" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.022366 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-t5gqj" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.036615 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-6986c4df8b-bkqnw"] Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.039161 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-6986c4df8b-bkqnw" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.041336 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-sctsj" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.049569 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5kmlf" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.071759 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-6986c4df8b-bkqnw"] Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.075002 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vj562" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.085355 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-pzwmk" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.086379 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gw5wx"] Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.089062 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gw5wx" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.090293 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcrjp\" (UniqueName: \"kubernetes.io/projected/3f2ebc07-fbfc-4bd6-9622-63b820e47247-kube-api-access-mcrjp\") pod \"telemetry-operator-controller-manager-6986c4df8b-bkqnw\" (UID: \"3f2ebc07-fbfc-4bd6-9622-63b820e47247\") " pod="openstack-operators/telemetry-operator-controller-manager-6986c4df8b-bkqnw" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.090381 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ln5t\" (UniqueName: \"kubernetes.io/projected/6b930423-80e6-4e2c-825f-7deceec090f5-kube-api-access-5ln5t\") pod \"octavia-operator-controller-manager-64cdc6ff96-nz8x4\" (UID: \"6b930423-80e6-4e2c-825f-7deceec090f5\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.090402 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9wsrk\" (UniqueName: \"kubernetes.io/projected/0c7b8e09-c502-425e-ac59-b2befd1132fa-kube-api-access-9wsrk\") pod \"ovn-operator-controller-manager-56897c768d-jn49q\" (UID: \"0c7b8e09-c502-425e-ac59-b2befd1132fa\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jn49q" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.090494 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r6bvh\" (UniqueName: \"kubernetes.io/projected/d4e9967e-dcf0-42c1-94fc-fea289ed54c2-kube-api-access-r6bvh\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54\" (UID: \"d4e9967e-dcf0-42c1-94fc-fea289ed54c2\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.090523 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xtm22\" (UniqueName: \"kubernetes.io/projected/736b757c-8584-4b59-81d6-ffdd8bbac62c-kube-api-access-xtm22\") pod \"swift-operator-controller-manager-d77b94747-2h7mb\" (UID: \"736b757c-8584-4b59-81d6-ffdd8bbac62c\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-2h7mb" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.090540 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d4e9967e-dcf0-42c1-94fc-fea289ed54c2-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54\" (UID: \"d4e9967e-dcf0-42c1-94fc-fea289ed54c2\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.090608 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2kjtz\" (UniqueName: \"kubernetes.io/projected/83927c87-ccd7-4b29-97b1-8d03ce0d1b1e-kube-api-access-2kjtz\") pod \"placement-operator-controller-manager-57988cc5b5-vjt6h\" (UID: \"83927c87-ccd7-4b29-97b1-8d03ce0d1b1e\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vjt6h" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.091728 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.091767 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-ljln7" Nov 26 22:38:49 crc kubenswrapper[4903]: E1126 22:38:49.092754 4903 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 22:38:49 crc kubenswrapper[4903]: E1126 22:38:49.092797 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d4e9967e-dcf0-42c1-94fc-fea289ed54c2-cert podName:d4e9967e-dcf0-42c1-94fc-fea289ed54c2 nodeName:}" failed. No retries permitted until 2025-11-26 22:38:49.592781069 +0000 UTC m=+1058.283015969 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d4e9967e-dcf0-42c1-94fc-fea289ed54c2-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54" (UID: "d4e9967e-dcf0-42c1-94fc-fea289ed54c2") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.106747 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gw5wx"] Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.108875 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r6bvh\" (UniqueName: \"kubernetes.io/projected/d4e9967e-dcf0-42c1-94fc-fea289ed54c2-kube-api-access-r6bvh\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54\" (UID: \"d4e9967e-dcf0-42c1-94fc-fea289ed54c2\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.117564 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2kjtz\" (UniqueName: \"kubernetes.io/projected/83927c87-ccd7-4b29-97b1-8d03ce0d1b1e-kube-api-access-2kjtz\") pod \"placement-operator-controller-manager-57988cc5b5-vjt6h\" (UID: \"83927c87-ccd7-4b29-97b1-8d03ce0d1b1e\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vjt6h" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.122247 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9wsrk\" (UniqueName: \"kubernetes.io/projected/0c7b8e09-c502-425e-ac59-b2befd1132fa-kube-api-access-9wsrk\") pod \"ovn-operator-controller-manager-56897c768d-jn49q\" (UID: \"0c7b8e09-c502-425e-ac59-b2befd1132fa\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jn49q" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.125717 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vjt6h" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.129242 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ln5t\" (UniqueName: \"kubernetes.io/projected/6b930423-80e6-4e2c-825f-7deceec090f5-kube-api-access-5ln5t\") pod \"octavia-operator-controller-manager-64cdc6ff96-nz8x4\" (UID: \"6b930423-80e6-4e2c-825f-7deceec090f5\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.176761 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-bwfhp"] Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.190020 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jn49q" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.196051 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b34e8bed-559a-49d6-b870-c375f36be49f-cert\") pod \"infra-operator-controller-manager-57548d458d-tdlsw\" (UID: \"b34e8bed-559a-49d6-b870-c375f36be49f\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.196131 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6j896\" (UniqueName: \"kubernetes.io/projected/1a890e26-66fb-47d6-85dc-ae6b9045e4c6-kube-api-access-6j896\") pod \"test-operator-controller-manager-5cd6c7f4c8-gw5wx\" (UID: \"1a890e26-66fb-47d6-85dc-ae6b9045e4c6\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gw5wx" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.196195 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xtm22\" (UniqueName: \"kubernetes.io/projected/736b757c-8584-4b59-81d6-ffdd8bbac62c-kube-api-access-xtm22\") pod \"swift-operator-controller-manager-d77b94747-2h7mb\" (UID: \"736b757c-8584-4b59-81d6-ffdd8bbac62c\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-2h7mb" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.196650 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcrjp\" (UniqueName: \"kubernetes.io/projected/3f2ebc07-fbfc-4bd6-9622-63b820e47247-kube-api-access-mcrjp\") pod \"telemetry-operator-controller-manager-6986c4df8b-bkqnw\" (UID: \"3f2ebc07-fbfc-4bd6-9622-63b820e47247\") " pod="openstack-operators/telemetry-operator-controller-manager-6986c4df8b-bkqnw" Nov 26 22:38:49 crc kubenswrapper[4903]: E1126 22:38:49.198434 4903 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 22:38:49 crc kubenswrapper[4903]: E1126 22:38:49.198488 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b34e8bed-559a-49d6-b870-c375f36be49f-cert podName:b34e8bed-559a-49d6-b870-c375f36be49f nodeName:}" failed. No retries permitted until 2025-11-26 22:38:50.198468345 +0000 UTC m=+1058.888703255 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b34e8bed-559a-49d6-b870-c375f36be49f-cert") pod "infra-operator-controller-manager-57548d458d-tdlsw" (UID: "b34e8bed-559a-49d6-b870-c375f36be49f") : secret "infra-operator-webhook-server-cert" not found Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.200529 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bwfhp" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.201939 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-t95pk" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.253212 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcrjp\" (UniqueName: \"kubernetes.io/projected/3f2ebc07-fbfc-4bd6-9622-63b820e47247-kube-api-access-mcrjp\") pod \"telemetry-operator-controller-manager-6986c4df8b-bkqnw\" (UID: \"3f2ebc07-fbfc-4bd6-9622-63b820e47247\") " pod="openstack-operators/telemetry-operator-controller-manager-6986c4df8b-bkqnw" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.253289 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-bwfhp"] Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.265986 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xtm22\" (UniqueName: \"kubernetes.io/projected/736b757c-8584-4b59-81d6-ffdd8bbac62c-kube-api-access-xtm22\") pod \"swift-operator-controller-manager-d77b94747-2h7mb\" (UID: \"736b757c-8584-4b59-81d6-ffdd8bbac62c\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-2h7mb" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.280147 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-6986c4df8b-bkqnw" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.303047 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sxcl8\" (UniqueName: \"kubernetes.io/projected/f8815d8e-4b34-47b3-98fa-8370205381e0-kube-api-access-sxcl8\") pod \"watcher-operator-controller-manager-656dcb59d4-bwfhp\" (UID: \"f8815d8e-4b34-47b3-98fa-8370205381e0\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bwfhp" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.303092 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6j896\" (UniqueName: \"kubernetes.io/projected/1a890e26-66fb-47d6-85dc-ae6b9045e4c6-kube-api-access-6j896\") pod \"test-operator-controller-manager-5cd6c7f4c8-gw5wx\" (UID: \"1a890e26-66fb-47d6-85dc-ae6b9045e4c6\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gw5wx" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.342790 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6j896\" (UniqueName: \"kubernetes.io/projected/1a890e26-66fb-47d6-85dc-ae6b9045e4c6-kube-api-access-6j896\") pod \"test-operator-controller-manager-5cd6c7f4c8-gw5wx\" (UID: \"1a890e26-66fb-47d6-85dc-ae6b9045e4c6\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gw5wx" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.357163 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77"] Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.358803 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.364893 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-fx5z4" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.365084 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.365296 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.375951 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77"] Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.389123 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fzd8p"] Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.390222 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fzd8p" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.401475 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.405343 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-vfdfv" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.406817 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-metrics-certs\") pod \"openstack-operator-controller-manager-5467d974c6-lpj77\" (UID: \"9239ccfa-cbaa-44b2-a70f-94a281d885f6\") " pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.406853 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ttmd\" (UniqueName: \"kubernetes.io/projected/9239ccfa-cbaa-44b2-a70f-94a281d885f6-kube-api-access-5ttmd\") pod \"openstack-operator-controller-manager-5467d974c6-lpj77\" (UID: \"9239ccfa-cbaa-44b2-a70f-94a281d885f6\") " pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.406932 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fprg5\" (UniqueName: \"kubernetes.io/projected/8248a160-f606-4eaa-9bc1-0e7fcc1ab852-kube-api-access-fprg5\") pod \"rabbitmq-cluster-operator-manager-668c99d594-fzd8p\" (UID: \"8248a160-f606-4eaa-9bc1-0e7fcc1ab852\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fzd8p" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.406963 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-webhook-certs\") pod \"openstack-operator-controller-manager-5467d974c6-lpj77\" (UID: \"9239ccfa-cbaa-44b2-a70f-94a281d885f6\") " pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.407018 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sxcl8\" (UniqueName: \"kubernetes.io/projected/f8815d8e-4b34-47b3-98fa-8370205381e0-kube-api-access-sxcl8\") pod \"watcher-operator-controller-manager-656dcb59d4-bwfhp\" (UID: \"f8815d8e-4b34-47b3-98fa-8370205381e0\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bwfhp" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.429871 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fzd8p"] Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.464961 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sxcl8\" (UniqueName: \"kubernetes.io/projected/f8815d8e-4b34-47b3-98fa-8370205381e0-kube-api-access-sxcl8\") pod \"watcher-operator-controller-manager-656dcb59d4-bwfhp\" (UID: \"f8815d8e-4b34-47b3-98fa-8370205381e0\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bwfhp" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.508780 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-metrics-certs\") pod \"openstack-operator-controller-manager-5467d974c6-lpj77\" (UID: \"9239ccfa-cbaa-44b2-a70f-94a281d885f6\") " pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.508826 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ttmd\" (UniqueName: \"kubernetes.io/projected/9239ccfa-cbaa-44b2-a70f-94a281d885f6-kube-api-access-5ttmd\") pod \"openstack-operator-controller-manager-5467d974c6-lpj77\" (UID: \"9239ccfa-cbaa-44b2-a70f-94a281d885f6\") " pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.508904 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fprg5\" (UniqueName: \"kubernetes.io/projected/8248a160-f606-4eaa-9bc1-0e7fcc1ab852-kube-api-access-fprg5\") pod \"rabbitmq-cluster-operator-manager-668c99d594-fzd8p\" (UID: \"8248a160-f606-4eaa-9bc1-0e7fcc1ab852\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fzd8p" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.508935 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-webhook-certs\") pod \"openstack-operator-controller-manager-5467d974c6-lpj77\" (UID: \"9239ccfa-cbaa-44b2-a70f-94a281d885f6\") " pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" Nov 26 22:38:49 crc kubenswrapper[4903]: E1126 22:38:49.508970 4903 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 22:38:49 crc kubenswrapper[4903]: E1126 22:38:49.509039 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-metrics-certs podName:9239ccfa-cbaa-44b2-a70f-94a281d885f6 nodeName:}" failed. No retries permitted until 2025-11-26 22:38:50.009022611 +0000 UTC m=+1058.699257521 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-metrics-certs") pod "openstack-operator-controller-manager-5467d974c6-lpj77" (UID: "9239ccfa-cbaa-44b2-a70f-94a281d885f6") : secret "metrics-server-cert" not found Nov 26 22:38:49 crc kubenswrapper[4903]: E1126 22:38:49.509089 4903 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 26 22:38:49 crc kubenswrapper[4903]: E1126 22:38:49.509139 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-webhook-certs podName:9239ccfa-cbaa-44b2-a70f-94a281d885f6 nodeName:}" failed. No retries permitted until 2025-11-26 22:38:50.009124224 +0000 UTC m=+1058.699359134 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-webhook-certs") pod "openstack-operator-controller-manager-5467d974c6-lpj77" (UID: "9239ccfa-cbaa-44b2-a70f-94a281d885f6") : secret "webhook-server-cert" not found Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.534249 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2h7mb" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.565508 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ttmd\" (UniqueName: \"kubernetes.io/projected/9239ccfa-cbaa-44b2-a70f-94a281d885f6-kube-api-access-5ttmd\") pod \"openstack-operator-controller-manager-5467d974c6-lpj77\" (UID: \"9239ccfa-cbaa-44b2-a70f-94a281d885f6\") " pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.585404 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fprg5\" (UniqueName: \"kubernetes.io/projected/8248a160-f606-4eaa-9bc1-0e7fcc1ab852-kube-api-access-fprg5\") pod \"rabbitmq-cluster-operator-manager-668c99d594-fzd8p\" (UID: \"8248a160-f606-4eaa-9bc1-0e7fcc1ab852\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fzd8p" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.603161 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gw5wx" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.607302 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-n7krq"] Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.609464 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bwfhp" Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.610511 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d4e9967e-dcf0-42c1-94fc-fea289ed54c2-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54\" (UID: \"d4e9967e-dcf0-42c1-94fc-fea289ed54c2\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54" Nov 26 22:38:49 crc kubenswrapper[4903]: E1126 22:38:49.610985 4903 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 22:38:49 crc kubenswrapper[4903]: E1126 22:38:49.611049 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d4e9967e-dcf0-42c1-94fc-fea289ed54c2-cert podName:d4e9967e-dcf0-42c1-94fc-fea289ed54c2 nodeName:}" failed. No retries permitted until 2025-11-26 22:38:50.61103142 +0000 UTC m=+1059.301266330 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d4e9967e-dcf0-42c1-94fc-fea289ed54c2-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54" (UID: "d4e9967e-dcf0-42c1-94fc-fea289ed54c2") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.613614 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-6hzbx"] Nov 26 22:38:49 crc kubenswrapper[4903]: W1126 22:38:49.663062 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3e621847_5f60_491a_8e5c_f2fb10df1726.slice/crio-c42311014e375c6f6e4ac9be8f75816078fb55f1dac0f3cdfa27c26fcb90f890 WatchSource:0}: Error finding container c42311014e375c6f6e4ac9be8f75816078fb55f1dac0f3cdfa27c26fcb90f890: Status 404 returned error can't find the container with id c42311014e375c6f6e4ac9be8f75816078fb55f1dac0f3cdfa27c26fcb90f890 Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.675970 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-n7krq" event={"ID":"d9a3465f-cd49-4af9-a908-58aec0273dbe","Type":"ContainerStarted","Data":"580ecdb633bfee6c4687f33f8471b9398d7b5d1722fad0f14b3c05737b1e2456"} Nov 26 22:38:49 crc kubenswrapper[4903]: I1126 22:38:49.719217 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fzd8p" Nov 26 22:38:50 crc kubenswrapper[4903]: I1126 22:38:50.018922 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-webhook-certs\") pod \"openstack-operator-controller-manager-5467d974c6-lpj77\" (UID: \"9239ccfa-cbaa-44b2-a70f-94a281d885f6\") " pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" Nov 26 22:38:50 crc kubenswrapper[4903]: I1126 22:38:50.019070 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-metrics-certs\") pod \"openstack-operator-controller-manager-5467d974c6-lpj77\" (UID: \"9239ccfa-cbaa-44b2-a70f-94a281d885f6\") " pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" Nov 26 22:38:50 crc kubenswrapper[4903]: E1126 22:38:50.019119 4903 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 26 22:38:50 crc kubenswrapper[4903]: E1126 22:38:50.019217 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-webhook-certs podName:9239ccfa-cbaa-44b2-a70f-94a281d885f6 nodeName:}" failed. No retries permitted until 2025-11-26 22:38:51.019190537 +0000 UTC m=+1059.709425487 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-webhook-certs") pod "openstack-operator-controller-manager-5467d974c6-lpj77" (UID: "9239ccfa-cbaa-44b2-a70f-94a281d885f6") : secret "webhook-server-cert" not found Nov 26 22:38:50 crc kubenswrapper[4903]: E1126 22:38:50.019271 4903 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 22:38:50 crc kubenswrapper[4903]: E1126 22:38:50.019352 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-metrics-certs podName:9239ccfa-cbaa-44b2-a70f-94a281d885f6 nodeName:}" failed. No retries permitted until 2025-11-26 22:38:51.019333821 +0000 UTC m=+1059.709568731 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-metrics-certs") pod "openstack-operator-controller-manager-5467d974c6-lpj77" (UID: "9239ccfa-cbaa-44b2-a70f-94a281d885f6") : secret "metrics-server-cert" not found Nov 26 22:38:50 crc kubenswrapper[4903]: I1126 22:38:50.223986 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b34e8bed-559a-49d6-b870-c375f36be49f-cert\") pod \"infra-operator-controller-manager-57548d458d-tdlsw\" (UID: \"b34e8bed-559a-49d6-b870-c375f36be49f\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw" Nov 26 22:38:50 crc kubenswrapper[4903]: E1126 22:38:50.224251 4903 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 22:38:50 crc kubenswrapper[4903]: E1126 22:38:50.225326 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b34e8bed-559a-49d6-b870-c375f36be49f-cert podName:b34e8bed-559a-49d6-b870-c375f36be49f nodeName:}" failed. No retries permitted until 2025-11-26 22:38:52.2252934 +0000 UTC m=+1060.915528350 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b34e8bed-559a-49d6-b870-c375f36be49f-cert") pod "infra-operator-controller-manager-57548d458d-tdlsw" (UID: "b34e8bed-559a-49d6-b870-c375f36be49f") : secret "infra-operator-webhook-server-cert" not found Nov 26 22:38:50 crc kubenswrapper[4903]: I1126 22:38:50.636055 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d4e9967e-dcf0-42c1-94fc-fea289ed54c2-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54\" (UID: \"d4e9967e-dcf0-42c1-94fc-fea289ed54c2\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54" Nov 26 22:38:50 crc kubenswrapper[4903]: E1126 22:38:50.636498 4903 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 22:38:50 crc kubenswrapper[4903]: E1126 22:38:50.636574 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d4e9967e-dcf0-42c1-94fc-fea289ed54c2-cert podName:d4e9967e-dcf0-42c1-94fc-fea289ed54c2 nodeName:}" failed. No retries permitted until 2025-11-26 22:38:52.63655186 +0000 UTC m=+1061.326786790 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d4e9967e-dcf0-42c1-94fc-fea289ed54c2-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54" (UID: "d4e9967e-dcf0-42c1-94fc-fea289ed54c2") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 22:38:50 crc kubenswrapper[4903]: I1126 22:38:50.640536 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-rtztw"] Nov 26 22:38:50 crc kubenswrapper[4903]: W1126 22:38:50.657186 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod83927c87_ccd7_4b29_97b1_8d03ce0d1b1e.slice/crio-18842fca036d8c9f667623a5b9600787ee27ce8c42cffb649924b6d5c0fde1e1 WatchSource:0}: Error finding container 18842fca036d8c9f667623a5b9600787ee27ce8c42cffb649924b6d5c0fde1e1: Status 404 returned error can't find the container with id 18842fca036d8c9f667623a5b9600787ee27ce8c42cffb649924b6d5c0fde1e1 Nov 26 22:38:50 crc kubenswrapper[4903]: I1126 22:38:50.663041 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-t5gqj"] Nov 26 22:38:50 crc kubenswrapper[4903]: I1126 22:38:50.676575 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s"] Nov 26 22:38:50 crc kubenswrapper[4903]: I1126 22:38:50.693986 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-vjt6h"] Nov 26 22:38:50 crc kubenswrapper[4903]: W1126 22:38:50.706514 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod34b48ba8_04a0_463d_9e31_b7c13127ce9c.slice/crio-b7d7624f6d4c168b25e1ccc748cff733aef61c0cf1ffe4cec97b31afd7b9d3dc WatchSource:0}: Error finding container b7d7624f6d4c168b25e1ccc748cff733aef61c0cf1ffe4cec97b31afd7b9d3dc: Status 404 returned error can't find the container with id b7d7624f6d4c168b25e1ccc748cff733aef61c0cf1ffe4cec97b31afd7b9d3dc Nov 26 22:38:50 crc kubenswrapper[4903]: I1126 22:38:50.707753 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr"] Nov 26 22:38:50 crc kubenswrapper[4903]: W1126 22:38:50.708237 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podedfb7faf_e9af_4ee8_85cd_a11af5812946.slice/crio-9659f7be97f6ef37d27a0285af40c04215e7f83dfd7315f952732144e7da357f WatchSource:0}: Error finding container 9659f7be97f6ef37d27a0285af40c04215e7f83dfd7315f952732144e7da357f: Status 404 returned error can't find the container with id 9659f7be97f6ef37d27a0285af40c04215e7f83dfd7315f952732144e7da357f Nov 26 22:38:50 crc kubenswrapper[4903]: I1126 22:38:50.708380 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-6hzbx" event={"ID":"3e621847-5f60-491a-8e5c-f2fb10df1726","Type":"ContainerStarted","Data":"c42311014e375c6f6e4ac9be8f75816078fb55f1dac0f3cdfa27c26fcb90f890"} Nov 26 22:38:50 crc kubenswrapper[4903]: W1126 22:38:50.714719 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod710215b7_5e67_47d8_833f_b8db638cac56.slice/crio-932cef7c9f74f50c46921ff6e5772b4cff18524f91f1dd54822c32d9da5c343f WatchSource:0}: Error finding container 932cef7c9f74f50c46921ff6e5772b4cff18524f91f1dd54822c32d9da5c343f: Status 404 returned error can't find the container with id 932cef7c9f74f50c46921ff6e5772b4cff18524f91f1dd54822c32d9da5c343f Nov 26 22:38:50 crc kubenswrapper[4903]: I1126 22:38:50.715494 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" event={"ID":"ced64189-a8c9-4e13-956b-f69139a9602b","Type":"ContainerStarted","Data":"2936f145a76c2ba9a0ec3f8c4e7e3bdfbaf70eeef833bb19b8661ebfc9702043"} Nov 26 22:38:50 crc kubenswrapper[4903]: W1126 22:38:50.717291 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfcacd7dc_2b08_46d7_98c2_09cf6b6d690b.slice/crio-c02aecb6d6a10b922dd012c0afa260e77ec1f6235d79d3b4091e6047bd4cfe43 WatchSource:0}: Error finding container c02aecb6d6a10b922dd012c0afa260e77ec1f6235d79d3b4091e6047bd4cfe43: Status 404 returned error can't find the container with id c02aecb6d6a10b922dd012c0afa260e77ec1f6235d79d3b4091e6047bd4cfe43 Nov 26 22:38:50 crc kubenswrapper[4903]: I1126 22:38:50.721005 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vjt6h" event={"ID":"83927c87-ccd7-4b29-97b1-8d03ce0d1b1e","Type":"ContainerStarted","Data":"18842fca036d8c9f667623a5b9600787ee27ce8c42cffb649924b6d5c0fde1e1"} Nov 26 22:38:50 crc kubenswrapper[4903]: I1126 22:38:50.724546 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s" event={"ID":"e3d89c00-9723-43a3-a1d2-866787257900","Type":"ContainerStarted","Data":"879b30f2de6bd80ea7aa58e0d8f9fba11b34fbf4dbf41118e3966555d474f818"} Nov 26 22:38:50 crc kubenswrapper[4903]: I1126 22:38:50.728090 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66"] Nov 26 22:38:50 crc kubenswrapper[4903]: W1126 22:38:50.737781 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod63feada5_3911_469e_a0b1_539b7aa2948d.slice/crio-bbd77e4095caddeb73777d739d9738fc0185330983888df7aac24eb30f33f038 WatchSource:0}: Error finding container bbd77e4095caddeb73777d739d9738fc0185330983888df7aac24eb30f33f038: Status 404 returned error can't find the container with id bbd77e4095caddeb73777d739d9738fc0185330983888df7aac24eb30f33f038 Nov 26 22:38:50 crc kubenswrapper[4903]: I1126 22:38:50.737823 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7"] Nov 26 22:38:50 crc kubenswrapper[4903]: W1126 22:38:50.748774 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode0c12217_0537_436e_b0d9_5e5049888268.slice/crio-8148be25fae41b0219446e9f3ad5695968837b84db9f8e560993b048b26f3101 WatchSource:0}: Error finding container 8148be25fae41b0219446e9f3ad5695968837b84db9f8e560993b048b26f3101: Status 404 returned error can't find the container with id 8148be25fae41b0219446e9f3ad5695968837b84db9f8e560993b048b26f3101 Nov 26 22:38:50 crc kubenswrapper[4903]: I1126 22:38:50.755850 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-shqxg"] Nov 26 22:38:50 crc kubenswrapper[4903]: I1126 22:38:50.771735 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-vj562"] Nov 26 22:38:50 crc kubenswrapper[4903]: I1126 22:38:50.775643 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-5kmlf"] Nov 26 22:38:50 crc kubenswrapper[4903]: I1126 22:38:50.938079 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-6986c4df8b-bkqnw"] Nov 26 22:38:50 crc kubenswrapper[4903]: I1126 22:38:50.947700 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-jn49q"] Nov 26 22:38:50 crc kubenswrapper[4903]: I1126 22:38:50.954605 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-bwfhp"] Nov 26 22:38:50 crc kubenswrapper[4903]: I1126 22:38:50.958858 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-pzwmk"] Nov 26 22:38:50 crc kubenswrapper[4903]: I1126 22:38:50.964462 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fzd8p"] Nov 26 22:38:50 crc kubenswrapper[4903]: W1126 22:38:50.970900 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0c7b8e09_c502_425e_ac59_b2befd1132fa.slice/crio-4d8162c0f0ecc179291bfb0bb9f1e140625ba5a45c9afdaf4ad80074b87c4176 WatchSource:0}: Error finding container 4d8162c0f0ecc179291bfb0bb9f1e140625ba5a45c9afdaf4ad80074b87c4176: Status 404 returned error can't find the container with id 4d8162c0f0ecc179291bfb0bb9f1e140625ba5a45c9afdaf4ad80074b87c4176 Nov 26 22:38:50 crc kubenswrapper[4903]: E1126 22:38:50.989523 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-sxcl8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-656dcb59d4-bwfhp_openstack-operators(f8815d8e-4b34-47b3-98fa-8370205381e0): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 22:38:50 crc kubenswrapper[4903]: W1126 22:38:50.994980 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3f2ebc07_fbfc_4bd6_9622_63b820e47247.slice/crio-2c175da008b286a9a99e4abc8120ee8751db741a881482269e73ad5f6d03bbe2 WatchSource:0}: Error finding container 2c175da008b286a9a99e4abc8120ee8751db741a881482269e73ad5f6d03bbe2: Status 404 returned error can't find the container with id 2c175da008b286a9a99e4abc8120ee8751db741a881482269e73ad5f6d03bbe2 Nov 26 22:38:50 crc kubenswrapper[4903]: E1126 22:38:50.998267 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.66:5001/openstack-k8s-operators/telemetry-operator:4d087536edb04db3b234f903236ee694881e7744,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mcrjp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-6986c4df8b-bkqnw_openstack-operators(3f2ebc07-fbfc-4bd6-9622-63b820e47247): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 22:38:51 crc kubenswrapper[4903]: E1126 22:38:51.000507 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mcrjp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-6986c4df8b-bkqnw_openstack-operators(3f2ebc07-fbfc-4bd6-9622-63b820e47247): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 22:38:51 crc kubenswrapper[4903]: W1126 22:38:51.001052 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod32ccd880_8dfa_46d1_b262_5d10422527ec.slice/crio-b43c6b6411b2cb32b0374ca64b7dd732a55bf0b9efc4ad57566272f5beffc5be WatchSource:0}: Error finding container b43c6b6411b2cb32b0374ca64b7dd732a55bf0b9efc4ad57566272f5beffc5be: Status 404 returned error can't find the container with id b43c6b6411b2cb32b0374ca64b7dd732a55bf0b9efc4ad57566272f5beffc5be Nov 26 22:38:51 crc kubenswrapper[4903]: E1126 22:38:51.001743 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/telemetry-operator-controller-manager-6986c4df8b-bkqnw" podUID="3f2ebc07-fbfc-4bd6-9622-63b820e47247" Nov 26 22:38:51 crc kubenswrapper[4903]: E1126 22:38:51.005665 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-sxcl8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-656dcb59d4-bwfhp_openstack-operators(f8815d8e-4b34-47b3-98fa-8370205381e0): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 22:38:51 crc kubenswrapper[4903]: E1126 22:38:51.005822 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:888edf6f432e52eaa5fc3caeae616fe38a3302b006bbba0e38885b2beba9f0f2,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-q9g2h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-66f4dd4bc7-pzwmk_openstack-operators(32ccd880-8dfa-46d1-b262-5d10422527ec): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 22:38:51 crc kubenswrapper[4903]: E1126 22:38:51.007074 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bwfhp" podUID="f8815d8e-4b34-47b3-98fa-8370205381e0" Nov 26 22:38:51 crc kubenswrapper[4903]: E1126 22:38:51.008860 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-q9g2h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-66f4dd4bc7-pzwmk_openstack-operators(32ccd880-8dfa-46d1-b262-5d10422527ec): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 22:38:51 crc kubenswrapper[4903]: I1126 22:38:51.009064 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4"] Nov 26 22:38:51 crc kubenswrapper[4903]: E1126 22:38:51.012003 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fprg5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-fzd8p_openstack-operators(8248a160-f606-4eaa-9bc1-0e7fcc1ab852): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 22:38:51 crc kubenswrapper[4903]: E1126 22:38:51.012636 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:ddc8a82f05930db8ee7a8d6d189b5a66373060656e4baf71ac302f89c477da4c,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5ln5t,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-64cdc6ff96-nz8x4_openstack-operators(6b930423-80e6-4e2c-825f-7deceec090f5): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 22:38:51 crc kubenswrapper[4903]: I1126 22:38:51.015044 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gw5wx"] Nov 26 22:38:51 crc kubenswrapper[4903]: E1126 22:38:51.016220 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-pzwmk" podUID="32ccd880-8dfa-46d1-b262-5d10422527ec" Nov 26 22:38:51 crc kubenswrapper[4903]: E1126 22:38:51.016237 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fzd8p" podUID="8248a160-f606-4eaa-9bc1-0e7fcc1ab852" Nov 26 22:38:51 crc kubenswrapper[4903]: E1126 22:38:51.018357 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5ln5t,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-64cdc6ff96-nz8x4_openstack-operators(6b930423-80e6-4e2c-825f-7deceec090f5): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 22:38:51 crc kubenswrapper[4903]: W1126 22:38:51.019383 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1a890e26_66fb_47d6_85dc_ae6b9045e4c6.slice/crio-0b6d7b0fc38c64b371062264cd85b9cddb5c15328f147454f3d8400c8164d481 WatchSource:0}: Error finding container 0b6d7b0fc38c64b371062264cd85b9cddb5c15328f147454f3d8400c8164d481: Status 404 returned error can't find the container with id 0b6d7b0fc38c64b371062264cd85b9cddb5c15328f147454f3d8400c8164d481 Nov 26 22:38:51 crc kubenswrapper[4903]: E1126 22:38:51.019429 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" podUID="6b930423-80e6-4e2c-825f-7deceec090f5" Nov 26 22:38:51 crc kubenswrapper[4903]: E1126 22:38:51.023647 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6j896,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cd6c7f4c8-gw5wx_openstack-operators(1a890e26-66fb-47d6-85dc-ae6b9045e4c6): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 22:38:51 crc kubenswrapper[4903]: E1126 22:38:51.030058 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6j896,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cd6c7f4c8-gw5wx_openstack-operators(1a890e26-66fb-47d6-85dc-ae6b9045e4c6): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 22:38:51 crc kubenswrapper[4903]: E1126 22:38:51.036560 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gw5wx" podUID="1a890e26-66fb-47d6-85dc-ae6b9045e4c6" Nov 26 22:38:51 crc kubenswrapper[4903]: I1126 22:38:51.045146 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-webhook-certs\") pod \"openstack-operator-controller-manager-5467d974c6-lpj77\" (UID: \"9239ccfa-cbaa-44b2-a70f-94a281d885f6\") " pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" Nov 26 22:38:51 crc kubenswrapper[4903]: I1126 22:38:51.045288 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-metrics-certs\") pod \"openstack-operator-controller-manager-5467d974c6-lpj77\" (UID: \"9239ccfa-cbaa-44b2-a70f-94a281d885f6\") " pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" Nov 26 22:38:51 crc kubenswrapper[4903]: E1126 22:38:51.045432 4903 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 22:38:51 crc kubenswrapper[4903]: E1126 22:38:51.045471 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-metrics-certs podName:9239ccfa-cbaa-44b2-a70f-94a281d885f6 nodeName:}" failed. No retries permitted until 2025-11-26 22:38:53.045459037 +0000 UTC m=+1061.735693947 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-metrics-certs") pod "openstack-operator-controller-manager-5467d974c6-lpj77" (UID: "9239ccfa-cbaa-44b2-a70f-94a281d885f6") : secret "metrics-server-cert" not found Nov 26 22:38:51 crc kubenswrapper[4903]: E1126 22:38:51.045506 4903 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 26 22:38:51 crc kubenswrapper[4903]: E1126 22:38:51.045526 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-webhook-certs podName:9239ccfa-cbaa-44b2-a70f-94a281d885f6 nodeName:}" failed. No retries permitted until 2025-11-26 22:38:53.045520719 +0000 UTC m=+1061.735755629 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-webhook-certs") pod "openstack-operator-controller-manager-5467d974c6-lpj77" (UID: "9239ccfa-cbaa-44b2-a70f-94a281d885f6") : secret "webhook-server-cert" not found Nov 26 22:38:51 crc kubenswrapper[4903]: I1126 22:38:51.173236 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-2h7mb"] Nov 26 22:38:51 crc kubenswrapper[4903]: W1126 22:38:51.175250 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod736b757c_8584_4b59_81d6_ffdd8bbac62c.slice/crio-ca151cd5c4b33ae42bfc4c0e5c2c1e710fe029a7e82e130a6242dcebf6fc55ef WatchSource:0}: Error finding container ca151cd5c4b33ae42bfc4c0e5c2c1e710fe029a7e82e130a6242dcebf6fc55ef: Status 404 returned error can't find the container with id ca151cd5c4b33ae42bfc4c0e5c2c1e710fe029a7e82e130a6242dcebf6fc55ef Nov 26 22:38:51 crc kubenswrapper[4903]: I1126 22:38:51.763338 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jn49q" event={"ID":"0c7b8e09-c502-425e-ac59-b2befd1132fa","Type":"ContainerStarted","Data":"4d8162c0f0ecc179291bfb0bb9f1e140625ba5a45c9afdaf4ad80074b87c4176"} Nov 26 22:38:51 crc kubenswrapper[4903]: I1126 22:38:51.765501 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-t5gqj" event={"ID":"9c3a16ab-252a-4a01-aaab-b273d3d55c0a","Type":"ContainerStarted","Data":"18e9bccea1849d3229b3060e07e8ad3f60d3099835aab20baea517d764ebcaa7"} Nov 26 22:38:51 crc kubenswrapper[4903]: I1126 22:38:51.768737 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-pzwmk" event={"ID":"32ccd880-8dfa-46d1-b262-5d10422527ec","Type":"ContainerStarted","Data":"b43c6b6411b2cb32b0374ca64b7dd732a55bf0b9efc4ad57566272f5beffc5be"} Nov 26 22:38:51 crc kubenswrapper[4903]: I1126 22:38:51.772586 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-rtztw" event={"ID":"63feada5-3911-469e-a0b1-539b7aa2948d","Type":"ContainerStarted","Data":"bbd77e4095caddeb73777d739d9738fc0185330983888df7aac24eb30f33f038"} Nov 26 22:38:51 crc kubenswrapper[4903]: E1126 22:38:51.773145 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/mariadb-operator@sha256:888edf6f432e52eaa5fc3caeae616fe38a3302b006bbba0e38885b2beba9f0f2\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-pzwmk" podUID="32ccd880-8dfa-46d1-b262-5d10422527ec" Nov 26 22:38:51 crc kubenswrapper[4903]: I1126 22:38:51.775700 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vj562" event={"ID":"edfb7faf-e9af-4ee8-85cd-a11af5812946","Type":"ContainerStarted","Data":"9659f7be97f6ef37d27a0285af40c04215e7f83dfd7315f952732144e7da357f"} Nov 26 22:38:51 crc kubenswrapper[4903]: I1126 22:38:51.783472 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gw5wx" event={"ID":"1a890e26-66fb-47d6-85dc-ae6b9045e4c6","Type":"ContainerStarted","Data":"0b6d7b0fc38c64b371062264cd85b9cddb5c15328f147454f3d8400c8164d481"} Nov 26 22:38:51 crc kubenswrapper[4903]: I1126 22:38:51.786843 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66" event={"ID":"34b48ba8-04a0-463d-9e31-b7c13127ce9c","Type":"ContainerStarted","Data":"b7d7624f6d4c168b25e1ccc748cff733aef61c0cf1ffe4cec97b31afd7b9d3dc"} Nov 26 22:38:51 crc kubenswrapper[4903]: E1126 22:38:51.786876 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gw5wx" podUID="1a890e26-66fb-47d6-85dc-ae6b9045e4c6" Nov 26 22:38:51 crc kubenswrapper[4903]: I1126 22:38:51.790948 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5kmlf" event={"ID":"fcacd7dc-2b08-46d7-98c2-09cf6b6d690b","Type":"ContainerStarted","Data":"c02aecb6d6a10b922dd012c0afa260e77ec1f6235d79d3b4091e6047bd4cfe43"} Nov 26 22:38:51 crc kubenswrapper[4903]: I1126 22:38:51.795130 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2h7mb" event={"ID":"736b757c-8584-4b59-81d6-ffdd8bbac62c","Type":"ContainerStarted","Data":"ca151cd5c4b33ae42bfc4c0e5c2c1e710fe029a7e82e130a6242dcebf6fc55ef"} Nov 26 22:38:51 crc kubenswrapper[4903]: I1126 22:38:51.798674 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fzd8p" event={"ID":"8248a160-f606-4eaa-9bc1-0e7fcc1ab852","Type":"ContainerStarted","Data":"aab0eaf257561270c026586f9fcebd55f7aa21faeb13dcc952bcd3851016ee13"} Nov 26 22:38:51 crc kubenswrapper[4903]: E1126 22:38:51.802986 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fzd8p" podUID="8248a160-f606-4eaa-9bc1-0e7fcc1ab852" Nov 26 22:38:51 crc kubenswrapper[4903]: I1126 22:38:51.809166 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" event={"ID":"6b930423-80e6-4e2c-825f-7deceec090f5","Type":"ContainerStarted","Data":"b965b5e1fa58728147be1044b9386139df183d4e15806f44920aa165a6fcaa04"} Nov 26 22:38:51 crc kubenswrapper[4903]: I1126 22:38:51.814417 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr" event={"ID":"e0c12217-0537-436e-b0d9-5e5049888268","Type":"ContainerStarted","Data":"8148be25fae41b0219446e9f3ad5695968837b84db9f8e560993b048b26f3101"} Nov 26 22:38:51 crc kubenswrapper[4903]: E1126 22:38:51.819632 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:ddc8a82f05930db8ee7a8d6d189b5a66373060656e4baf71ac302f89c477da4c\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" podUID="6b930423-80e6-4e2c-825f-7deceec090f5" Nov 26 22:38:51 crc kubenswrapper[4903]: I1126 22:38:51.824724 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-6986c4df8b-bkqnw" event={"ID":"3f2ebc07-fbfc-4bd6-9622-63b820e47247","Type":"ContainerStarted","Data":"2c175da008b286a9a99e4abc8120ee8751db741a881482269e73ad5f6d03bbe2"} Nov 26 22:38:51 crc kubenswrapper[4903]: E1126 22:38:51.827042 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.66:5001/openstack-k8s-operators/telemetry-operator:4d087536edb04db3b234f903236ee694881e7744\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-6986c4df8b-bkqnw" podUID="3f2ebc07-fbfc-4bd6-9622-63b820e47247" Nov 26 22:38:51 crc kubenswrapper[4903]: I1126 22:38:51.830446 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-shqxg" event={"ID":"710215b7-5e67-47d8-833f-b8db638cac56","Type":"ContainerStarted","Data":"932cef7c9f74f50c46921ff6e5772b4cff18524f91f1dd54822c32d9da5c343f"} Nov 26 22:38:51 crc kubenswrapper[4903]: I1126 22:38:51.834419 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bwfhp" event={"ID":"f8815d8e-4b34-47b3-98fa-8370205381e0","Type":"ContainerStarted","Data":"cb6a326a30788aeeb16011350ae80ce5eae6ac456b4d48b3af68bb00e2a76c04"} Nov 26 22:38:51 crc kubenswrapper[4903]: E1126 22:38:51.837485 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bwfhp" podUID="f8815d8e-4b34-47b3-98fa-8370205381e0" Nov 26 22:38:52 crc kubenswrapper[4903]: I1126 22:38:52.271725 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b34e8bed-559a-49d6-b870-c375f36be49f-cert\") pod \"infra-operator-controller-manager-57548d458d-tdlsw\" (UID: \"b34e8bed-559a-49d6-b870-c375f36be49f\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw" Nov 26 22:38:52 crc kubenswrapper[4903]: E1126 22:38:52.271967 4903 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 22:38:52 crc kubenswrapper[4903]: E1126 22:38:52.272013 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b34e8bed-559a-49d6-b870-c375f36be49f-cert podName:b34e8bed-559a-49d6-b870-c375f36be49f nodeName:}" failed. No retries permitted until 2025-11-26 22:38:56.271998945 +0000 UTC m=+1064.962233855 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b34e8bed-559a-49d6-b870-c375f36be49f-cert") pod "infra-operator-controller-manager-57548d458d-tdlsw" (UID: "b34e8bed-559a-49d6-b870-c375f36be49f") : secret "infra-operator-webhook-server-cert" not found Nov 26 22:38:52 crc kubenswrapper[4903]: I1126 22:38:52.677500 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d4e9967e-dcf0-42c1-94fc-fea289ed54c2-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54\" (UID: \"d4e9967e-dcf0-42c1-94fc-fea289ed54c2\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54" Nov 26 22:38:52 crc kubenswrapper[4903]: E1126 22:38:52.677816 4903 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 22:38:52 crc kubenswrapper[4903]: E1126 22:38:52.677884 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d4e9967e-dcf0-42c1-94fc-fea289ed54c2-cert podName:d4e9967e-dcf0-42c1-94fc-fea289ed54c2 nodeName:}" failed. No retries permitted until 2025-11-26 22:38:56.677865242 +0000 UTC m=+1065.368100142 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d4e9967e-dcf0-42c1-94fc-fea289ed54c2-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54" (UID: "d4e9967e-dcf0-42c1-94fc-fea289ed54c2") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 22:38:52 crc kubenswrapper[4903]: E1126 22:38:52.843864 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fzd8p" podUID="8248a160-f606-4eaa-9bc1-0e7fcc1ab852" Nov 26 22:38:52 crc kubenswrapper[4903]: E1126 22:38:52.846221 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.66:5001/openstack-k8s-operators/telemetry-operator:4d087536edb04db3b234f903236ee694881e7744\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-6986c4df8b-bkqnw" podUID="3f2ebc07-fbfc-4bd6-9622-63b820e47247" Nov 26 22:38:52 crc kubenswrapper[4903]: E1126 22:38:52.846224 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:ddc8a82f05930db8ee7a8d6d189b5a66373060656e4baf71ac302f89c477da4c\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" podUID="6b930423-80e6-4e2c-825f-7deceec090f5" Nov 26 22:38:52 crc kubenswrapper[4903]: E1126 22:38:52.846322 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bwfhp" podUID="f8815d8e-4b34-47b3-98fa-8370205381e0" Nov 26 22:38:52 crc kubenswrapper[4903]: E1126 22:38:52.846347 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/mariadb-operator@sha256:888edf6f432e52eaa5fc3caeae616fe38a3302b006bbba0e38885b2beba9f0f2\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-pzwmk" podUID="32ccd880-8dfa-46d1-b262-5d10422527ec" Nov 26 22:38:52 crc kubenswrapper[4903]: E1126 22:38:52.846417 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gw5wx" podUID="1a890e26-66fb-47d6-85dc-ae6b9045e4c6" Nov 26 22:38:53 crc kubenswrapper[4903]: I1126 22:38:53.084653 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-webhook-certs\") pod \"openstack-operator-controller-manager-5467d974c6-lpj77\" (UID: \"9239ccfa-cbaa-44b2-a70f-94a281d885f6\") " pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" Nov 26 22:38:53 crc kubenswrapper[4903]: I1126 22:38:53.084793 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-metrics-certs\") pod \"openstack-operator-controller-manager-5467d974c6-lpj77\" (UID: \"9239ccfa-cbaa-44b2-a70f-94a281d885f6\") " pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" Nov 26 22:38:53 crc kubenswrapper[4903]: E1126 22:38:53.084895 4903 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 26 22:38:53 crc kubenswrapper[4903]: E1126 22:38:53.084972 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-webhook-certs podName:9239ccfa-cbaa-44b2-a70f-94a281d885f6 nodeName:}" failed. No retries permitted until 2025-11-26 22:38:57.084953302 +0000 UTC m=+1065.775188212 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-webhook-certs") pod "openstack-operator-controller-manager-5467d974c6-lpj77" (UID: "9239ccfa-cbaa-44b2-a70f-94a281d885f6") : secret "webhook-server-cert" not found Nov 26 22:38:53 crc kubenswrapper[4903]: E1126 22:38:53.085013 4903 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 22:38:53 crc kubenswrapper[4903]: E1126 22:38:53.085085 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-metrics-certs podName:9239ccfa-cbaa-44b2-a70f-94a281d885f6 nodeName:}" failed. No retries permitted until 2025-11-26 22:38:57.085066695 +0000 UTC m=+1065.775301605 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-metrics-certs") pod "openstack-operator-controller-manager-5467d974c6-lpj77" (UID: "9239ccfa-cbaa-44b2-a70f-94a281d885f6") : secret "metrics-server-cert" not found Nov 26 22:38:56 crc kubenswrapper[4903]: I1126 22:38:56.349769 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b34e8bed-559a-49d6-b870-c375f36be49f-cert\") pod \"infra-operator-controller-manager-57548d458d-tdlsw\" (UID: \"b34e8bed-559a-49d6-b870-c375f36be49f\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw" Nov 26 22:38:56 crc kubenswrapper[4903]: E1126 22:38:56.350114 4903 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 22:38:56 crc kubenswrapper[4903]: E1126 22:38:56.350230 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b34e8bed-559a-49d6-b870-c375f36be49f-cert podName:b34e8bed-559a-49d6-b870-c375f36be49f nodeName:}" failed. No retries permitted until 2025-11-26 22:39:04.350204143 +0000 UTC m=+1073.040439093 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b34e8bed-559a-49d6-b870-c375f36be49f-cert") pod "infra-operator-controller-manager-57548d458d-tdlsw" (UID: "b34e8bed-559a-49d6-b870-c375f36be49f") : secret "infra-operator-webhook-server-cert" not found Nov 26 22:38:56 crc kubenswrapper[4903]: I1126 22:38:56.757407 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d4e9967e-dcf0-42c1-94fc-fea289ed54c2-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54\" (UID: \"d4e9967e-dcf0-42c1-94fc-fea289ed54c2\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54" Nov 26 22:38:56 crc kubenswrapper[4903]: E1126 22:38:56.757591 4903 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 22:38:56 crc kubenswrapper[4903]: E1126 22:38:56.757663 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d4e9967e-dcf0-42c1-94fc-fea289ed54c2-cert podName:d4e9967e-dcf0-42c1-94fc-fea289ed54c2 nodeName:}" failed. No retries permitted until 2025-11-26 22:39:04.757645462 +0000 UTC m=+1073.447880382 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d4e9967e-dcf0-42c1-94fc-fea289ed54c2-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54" (UID: "d4e9967e-dcf0-42c1-94fc-fea289ed54c2") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 22:38:57 crc kubenswrapper[4903]: I1126 22:38:57.164713 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-webhook-certs\") pod \"openstack-operator-controller-manager-5467d974c6-lpj77\" (UID: \"9239ccfa-cbaa-44b2-a70f-94a281d885f6\") " pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" Nov 26 22:38:57 crc kubenswrapper[4903]: E1126 22:38:57.164920 4903 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 26 22:38:57 crc kubenswrapper[4903]: E1126 22:38:57.165808 4903 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 22:38:57 crc kubenswrapper[4903]: I1126 22:38:57.167318 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-metrics-certs\") pod \"openstack-operator-controller-manager-5467d974c6-lpj77\" (UID: \"9239ccfa-cbaa-44b2-a70f-94a281d885f6\") " pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" Nov 26 22:38:57 crc kubenswrapper[4903]: E1126 22:38:57.167328 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-webhook-certs podName:9239ccfa-cbaa-44b2-a70f-94a281d885f6 nodeName:}" failed. No retries permitted until 2025-11-26 22:39:05.167301059 +0000 UTC m=+1073.857535979 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-webhook-certs") pod "openstack-operator-controller-manager-5467d974c6-lpj77" (UID: "9239ccfa-cbaa-44b2-a70f-94a281d885f6") : secret "webhook-server-cert" not found Nov 26 22:38:57 crc kubenswrapper[4903]: E1126 22:38:57.167589 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-metrics-certs podName:9239ccfa-cbaa-44b2-a70f-94a281d885f6 nodeName:}" failed. No retries permitted until 2025-11-26 22:39:05.167573317 +0000 UTC m=+1073.857808247 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-metrics-certs") pod "openstack-operator-controller-manager-5467d974c6-lpj77" (UID: "9239ccfa-cbaa-44b2-a70f-94a281d885f6") : secret "metrics-server-cert" not found Nov 26 22:39:01 crc kubenswrapper[4903]: I1126 22:39:01.981484 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 22:39:01 crc kubenswrapper[4903]: I1126 22:39:01.983615 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 22:39:01 crc kubenswrapper[4903]: I1126 22:39:01.983883 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 22:39:01 crc kubenswrapper[4903]: I1126 22:39:01.984989 4903 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9f68f340d26b09594de1e8e15e4a05a42e976379d490a108a32a7c6572cae165"} pod="openshift-machine-config-operator/machine-config-daemon-wjwph" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 22:39:01 crc kubenswrapper[4903]: I1126 22:39:01.985319 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" containerID="cri-o://9f68f340d26b09594de1e8e15e4a05a42e976379d490a108a32a7c6572cae165" gracePeriod=600 Nov 26 22:39:02 crc kubenswrapper[4903]: I1126 22:39:02.977556 4903 generic.go:334] "Generic (PLEG): container finished" podID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerID="9f68f340d26b09594de1e8e15e4a05a42e976379d490a108a32a7c6572cae165" exitCode=0 Nov 26 22:39:02 crc kubenswrapper[4903]: I1126 22:39:02.977607 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerDied","Data":"9f68f340d26b09594de1e8e15e4a05a42e976379d490a108a32a7c6572cae165"} Nov 26 22:39:02 crc kubenswrapper[4903]: I1126 22:39:02.977652 4903 scope.go:117] "RemoveContainer" containerID="f36c7ac66ee1d12afd427e767b1119231b90a975fb7c25821f106b8b5f5dcac1" Nov 26 22:39:03 crc kubenswrapper[4903]: E1126 22:39:03.993472 4903 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/glance-operator@sha256:45ae665ce2ea81aef212ee402cb02693ee49001a7c88c40c9598ff2859b838a2" Nov 26 22:39:03 crc kubenswrapper[4903]: E1126 22:39:03.994084 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/glance-operator@sha256:45ae665ce2ea81aef212ee402cb02693ee49001a7c88c40c9598ff2859b838a2,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-w6j99,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-589cbd6b5b-shqxg_openstack-operators(710215b7-5e67-47d8-833f-b8db638cac56): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 22:39:04 crc kubenswrapper[4903]: I1126 22:39:04.411495 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b34e8bed-559a-49d6-b870-c375f36be49f-cert\") pod \"infra-operator-controller-manager-57548d458d-tdlsw\" (UID: \"b34e8bed-559a-49d6-b870-c375f36be49f\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw" Nov 26 22:39:04 crc kubenswrapper[4903]: I1126 22:39:04.424323 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b34e8bed-559a-49d6-b870-c375f36be49f-cert\") pod \"infra-operator-controller-manager-57548d458d-tdlsw\" (UID: \"b34e8bed-559a-49d6-b870-c375f36be49f\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw" Nov 26 22:39:04 crc kubenswrapper[4903]: I1126 22:39:04.453859 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw" Nov 26 22:39:04 crc kubenswrapper[4903]: E1126 22:39:04.652502 4903 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/heat-operator@sha256:2ee37ff474bee3203447df4f326a9279a515e770573153338296dd074722c677" Nov 26 22:39:04 crc kubenswrapper[4903]: E1126 22:39:04.653012 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/heat-operator@sha256:2ee37ff474bee3203447df4f326a9279a515e770573153338296dd074722c677,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-w9xdn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-5b77f656f-x59hr_openstack-operators(e0c12217-0537-436e-b0d9-5e5049888268): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 22:39:04 crc kubenswrapper[4903]: I1126 22:39:04.816313 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d4e9967e-dcf0-42c1-94fc-fea289ed54c2-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54\" (UID: \"d4e9967e-dcf0-42c1-94fc-fea289ed54c2\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54" Nov 26 22:39:04 crc kubenswrapper[4903]: I1126 22:39:04.823468 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d4e9967e-dcf0-42c1-94fc-fea289ed54c2-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54\" (UID: \"d4e9967e-dcf0-42c1-94fc-fea289ed54c2\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54" Nov 26 22:39:05 crc kubenswrapper[4903]: I1126 22:39:05.048211 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54" Nov 26 22:39:05 crc kubenswrapper[4903]: E1126 22:39:05.127983 4903 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/neutron-operator@sha256:e00a9ed0ab26c5b745bd804ab1fe6b22428d026f17ea05a05f045e060342f46c" Nov 26 22:39:05 crc kubenswrapper[4903]: E1126 22:39:05.128265 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:e00a9ed0ab26c5b745bd804ab1fe6b22428d026f17ea05a05f045e060342f46c,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-88lpk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-6fdcddb789-t5gqj_openstack-operators(9c3a16ab-252a-4a01-aaab-b273d3d55c0a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 22:39:05 crc kubenswrapper[4903]: I1126 22:39:05.233548 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-webhook-certs\") pod \"openstack-operator-controller-manager-5467d974c6-lpj77\" (UID: \"9239ccfa-cbaa-44b2-a70f-94a281d885f6\") " pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" Nov 26 22:39:05 crc kubenswrapper[4903]: I1126 22:39:05.233635 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-metrics-certs\") pod \"openstack-operator-controller-manager-5467d974c6-lpj77\" (UID: \"9239ccfa-cbaa-44b2-a70f-94a281d885f6\") " pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" Nov 26 22:39:05 crc kubenswrapper[4903]: I1126 22:39:05.238015 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-metrics-certs\") pod \"openstack-operator-controller-manager-5467d974c6-lpj77\" (UID: \"9239ccfa-cbaa-44b2-a70f-94a281d885f6\") " pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" Nov 26 22:39:05 crc kubenswrapper[4903]: I1126 22:39:05.238128 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9239ccfa-cbaa-44b2-a70f-94a281d885f6-webhook-certs\") pod \"openstack-operator-controller-manager-5467d974c6-lpj77\" (UID: \"9239ccfa-cbaa-44b2-a70f-94a281d885f6\") " pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" Nov 26 22:39:05 crc kubenswrapper[4903]: I1126 22:39:05.442014 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" Nov 26 22:39:06 crc kubenswrapper[4903]: E1126 22:39:06.070286 4903 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ironic-operator@sha256:d65dbfc956e9cf376f3c48fc3a0942cb7306b5164f898c40d1efca106df81db7" Nov 26 22:39:06 crc kubenswrapper[4903]: E1126 22:39:06.070462 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:d65dbfc956e9cf376f3c48fc3a0942cb7306b5164f898c40d1efca106df81db7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jzbd4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-67cb4dc6d4-bm7r7_openstack-operators(ced64189-a8c9-4e13-956b-f69139a9602b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 22:39:07 crc kubenswrapper[4903]: E1126 22:39:07.187919 4903 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/designate-operator@sha256:ec4e5c911c1d0f1ea211a04b251a9d2e95b69d141c1caf07a0381693b2d6368b" Nov 26 22:39:07 crc kubenswrapper[4903]: E1126 22:39:07.188911 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/designate-operator@sha256:ec4e5c911c1d0f1ea211a04b251a9d2e95b69d141c1caf07a0381693b2d6368b,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9bxtm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod designate-operator-controller-manager-955677c94-rtztw_openstack-operators(63feada5-3911-469e-a0b1-539b7aa2948d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 22:39:07 crc kubenswrapper[4903]: E1126 22:39:07.865611 4903 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/swift-operator@sha256:72236301580ff9080f7e311b832d7ba66666a9afeda51f969745229624ff26e4" Nov 26 22:39:07 crc kubenswrapper[4903]: E1126 22:39:07.866881 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:72236301580ff9080f7e311b832d7ba66666a9afeda51f969745229624ff26e4,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xtm22,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-d77b94747-2h7mb_openstack-operators(736b757c-8584-4b59-81d6-ffdd8bbac62c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 22:39:08 crc kubenswrapper[4903]: E1126 22:39:08.785432 4903 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:25faa5b0e4801d4d3b01a28b877ed3188eee71f33ad66f3c2e86b7921758e711" Nov 26 22:39:08 crc kubenswrapper[4903]: E1126 22:39:08.785888 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:25faa5b0e4801d4d3b01a28b877ed3188eee71f33ad66f3c2e86b7921758e711,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dtsk2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-7b4567c7cf-kxg8s_openstack-operators(e3d89c00-9723-43a3-a1d2-866787257900): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 22:39:16 crc kubenswrapper[4903]: I1126 22:39:16.968235 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw"] Nov 26 22:39:17 crc kubenswrapper[4903]: I1126 22:39:17.053368 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77"] Nov 26 22:39:17 crc kubenswrapper[4903]: I1126 22:39:17.058975 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54"] Nov 26 22:39:18 crc kubenswrapper[4903]: W1126 22:39:18.533438 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb34e8bed_559a_49d6_b870_c375f36be49f.slice/crio-63f042301dbba1ea4011bfe95ff861018f78ae0d258691b09d0fbbf4c5c30ac3 WatchSource:0}: Error finding container 63f042301dbba1ea4011bfe95ff861018f78ae0d258691b09d0fbbf4c5c30ac3: Status 404 returned error can't find the container with id 63f042301dbba1ea4011bfe95ff861018f78ae0d258691b09d0fbbf4c5c30ac3 Nov 26 22:39:19 crc kubenswrapper[4903]: I1126 22:39:19.156981 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw" event={"ID":"b34e8bed-559a-49d6-b870-c375f36be49f","Type":"ContainerStarted","Data":"63f042301dbba1ea4011bfe95ff861018f78ae0d258691b09d0fbbf4c5c30ac3"} Nov 26 22:39:19 crc kubenswrapper[4903]: I1126 22:39:19.158876 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54" event={"ID":"d4e9967e-dcf0-42c1-94fc-fea289ed54c2","Type":"ContainerStarted","Data":"fd18533771fec71dbaee14e8a1bc5d30c917580209517c3587915a0956aef2e8"} Nov 26 22:39:19 crc kubenswrapper[4903]: I1126 22:39:19.160160 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" event={"ID":"9239ccfa-cbaa-44b2-a70f-94a281d885f6","Type":"ContainerStarted","Data":"63c385c5689f6fbbb028651e0a459f9b797908dc7085e41475d7f4cba04ee5c6"} Nov 26 22:39:20 crc kubenswrapper[4903]: I1126 22:39:20.169834 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vj562" event={"ID":"edfb7faf-e9af-4ee8-85cd-a11af5812946","Type":"ContainerStarted","Data":"4cd2963ba045d645dcb7110139a8ae24a4d2fd2140d7dd107404d60e2e8b8984"} Nov 26 22:39:20 crc kubenswrapper[4903]: I1126 22:39:20.170855 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jn49q" event={"ID":"0c7b8e09-c502-425e-ac59-b2befd1132fa","Type":"ContainerStarted","Data":"0e45631332036649004a30569555be5e4bd132014e104ec35168613bbe348c66"} Nov 26 22:39:20 crc kubenswrapper[4903]: I1126 22:39:20.172515 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-6hzbx" event={"ID":"3e621847-5f60-491a-8e5c-f2fb10df1726","Type":"ContainerStarted","Data":"2e7dfc9aa7196f1c2148e674ded6c2df43849959ea97e30165028a9eafef25a7"} Nov 26 22:39:20 crc kubenswrapper[4903]: I1126 22:39:20.173779 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66" event={"ID":"34b48ba8-04a0-463d-9e31-b7c13127ce9c","Type":"ContainerStarted","Data":"a0d3a7edb607adba1eba960a30bd237f65ef6b3dfe4ef3e7678e71c811f6c968"} Nov 26 22:39:20 crc kubenswrapper[4903]: I1126 22:39:20.175519 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-n7krq" event={"ID":"d9a3465f-cd49-4af9-a908-58aec0273dbe","Type":"ContainerStarted","Data":"78b29c82490aeae36542576b3c953a3bd4910d210c60ee88b03fe025f8a92307"} Nov 26 22:39:20 crc kubenswrapper[4903]: I1126 22:39:20.176766 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5kmlf" event={"ID":"fcacd7dc-2b08-46d7-98c2-09cf6b6d690b","Type":"ContainerStarted","Data":"96c82dc1c33a0092972e9f05f6fbe9199dc6b53a530f909a2e37f78460fe4b7e"} Nov 26 22:39:20 crc kubenswrapper[4903]: I1126 22:39:20.179261 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vjt6h" event={"ID":"83927c87-ccd7-4b29-97b1-8d03ce0d1b1e","Type":"ContainerStarted","Data":"b2b2d84dd240cab1a6be339205b067a6324f0903f14464e14364fb9e5a831c01"} Nov 26 22:39:20 crc kubenswrapper[4903]: I1126 22:39:20.184453 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerStarted","Data":"847f128b302f65a898dbd9690c3fc64381891e05dcde345636fcc588de735302"} Nov 26 22:39:23 crc kubenswrapper[4903]: E1126 22:39:23.839735 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2h7mb" podUID="736b757c-8584-4b59-81d6-ffdd8bbac62c" Nov 26 22:39:23 crc kubenswrapper[4903]: E1126 22:39:23.869774 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-shqxg" podUID="710215b7-5e67-47d8-833f-b8db638cac56" Nov 26 22:39:23 crc kubenswrapper[4903]: E1126 22:39:23.908607 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" podUID="ced64189-a8c9-4e13-956b-f69139a9602b" Nov 26 22:39:24 crc kubenswrapper[4903]: I1126 22:39:24.271576 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw" event={"ID":"b34e8bed-559a-49d6-b870-c375f36be49f","Type":"ContainerStarted","Data":"8f387160167c450d6022dd0915096e662c6cef237217b5aaaf59921b82ba050d"} Nov 26 22:39:24 crc kubenswrapper[4903]: I1126 22:39:24.275833 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gw5wx" event={"ID":"1a890e26-66fb-47d6-85dc-ae6b9045e4c6","Type":"ContainerStarted","Data":"ddcc6f06f473530bca47bc249215ac04819ba47c6da30c7a7308f6cbfd86e75f"} Nov 26 22:39:24 crc kubenswrapper[4903]: I1126 22:39:24.279015 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-pzwmk" event={"ID":"32ccd880-8dfa-46d1-b262-5d10422527ec","Type":"ContainerStarted","Data":"3c94ced101570c9af35cd73d7d22b0b61b6d34a120764fc28d341cb1830af1de"} Nov 26 22:39:24 crc kubenswrapper[4903]: I1126 22:39:24.292635 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" event={"ID":"9239ccfa-cbaa-44b2-a70f-94a281d885f6","Type":"ContainerStarted","Data":"2df00e35d27059d34ff1d2f494351a367b0de5e3e774f99a24d52aa17250d1cd"} Nov 26 22:39:24 crc kubenswrapper[4903]: I1126 22:39:24.293831 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" Nov 26 22:39:24 crc kubenswrapper[4903]: I1126 22:39:24.303833 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" event={"ID":"ced64189-a8c9-4e13-956b-f69139a9602b","Type":"ContainerStarted","Data":"945351004a76a1789d76cc7e0f94ab0849c4eade026d733984e968bd05c78a46"} Nov 26 22:39:24 crc kubenswrapper[4903]: I1126 22:39:24.310159 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-shqxg" event={"ID":"710215b7-5e67-47d8-833f-b8db638cac56","Type":"ContainerStarted","Data":"52cfa243f71cb599a40418e35b37db435302c62358b8f9892e331f680abe3676"} Nov 26 22:39:24 crc kubenswrapper[4903]: I1126 22:39:24.313739 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fzd8p" event={"ID":"8248a160-f606-4eaa-9bc1-0e7fcc1ab852","Type":"ContainerStarted","Data":"a65314aa19c0b365aa18ec5534e98902b4078919be888afef02c8df601451f5e"} Nov 26 22:39:24 crc kubenswrapper[4903]: E1126 22:39:24.322314 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr" podUID="e0c12217-0537-436e-b0d9-5e5049888268" Nov 26 22:39:24 crc kubenswrapper[4903]: I1126 22:39:24.324213 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr" event={"ID":"e0c12217-0537-436e-b0d9-5e5049888268","Type":"ContainerStarted","Data":"0279a052fce9b1876ae50c8a21fe6a2923745fec884a89fbcc467f50eed11ce4"} Nov 26 22:39:24 crc kubenswrapper[4903]: I1126 22:39:24.334744 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" event={"ID":"6b930423-80e6-4e2c-825f-7deceec090f5","Type":"ContainerStarted","Data":"fabbb040c796ce55d9636c6dabe59f8527dc2b9e084baa50d784f1be11e0e2e9"} Nov 26 22:39:24 crc kubenswrapper[4903]: I1126 22:39:24.336550 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" podStartSLOduration=35.336528518 podStartE2EDuration="35.336528518s" podCreationTimestamp="2025-11-26 22:38:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:39:24.328749822 +0000 UTC m=+1093.018984732" watchObservedRunningTime="2025-11-26 22:39:24.336528518 +0000 UTC m=+1093.026763428" Nov 26 22:39:24 crc kubenswrapper[4903]: I1126 22:39:24.340599 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-6986c4df8b-bkqnw" event={"ID":"3f2ebc07-fbfc-4bd6-9622-63b820e47247","Type":"ContainerStarted","Data":"7ec669adb1f6ac9e1a2ccfabf07f67a13401b092148b962e90f2cb9c3a9dd9ee"} Nov 26 22:39:24 crc kubenswrapper[4903]: I1126 22:39:24.350775 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2h7mb" event={"ID":"736b757c-8584-4b59-81d6-ffdd8bbac62c","Type":"ContainerStarted","Data":"f859c5f30dadd17c93d03aa5a42a827d76b3509fa8dad93b31807899627ad0da"} Nov 26 22:39:24 crc kubenswrapper[4903]: I1126 22:39:24.355812 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bwfhp" event={"ID":"f8815d8e-4b34-47b3-98fa-8370205381e0","Type":"ContainerStarted","Data":"288bd314ce3deb5913cf3f356646030d190fc91c10dc8940d7e10b3f64644580"} Nov 26 22:39:24 crc kubenswrapper[4903]: I1126 22:39:24.356201 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fzd8p" podStartSLOduration=6.930748527 podStartE2EDuration="35.35618098s" podCreationTimestamp="2025-11-26 22:38:49 +0000 UTC" firstStartedPulling="2025-11-26 22:38:51.010971732 +0000 UTC m=+1059.701206642" lastFinishedPulling="2025-11-26 22:39:19.436404185 +0000 UTC m=+1088.126639095" observedRunningTime="2025-11-26 22:39:24.35054496 +0000 UTC m=+1093.040779870" watchObservedRunningTime="2025-11-26 22:39:24.35618098 +0000 UTC m=+1093.046415890" Nov 26 22:39:24 crc kubenswrapper[4903]: I1126 22:39:24.357475 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54" event={"ID":"d4e9967e-dcf0-42c1-94fc-fea289ed54c2","Type":"ContainerStarted","Data":"03b8de7e39cebb6aea4e1ecbf20184a5bdd9a63361f3dd0575cb70d140321eda"} Nov 26 22:39:24 crc kubenswrapper[4903]: E1126 22:39:24.931953 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s" podUID="e3d89c00-9723-43a3-a1d2-866787257900" Nov 26 22:39:24 crc kubenswrapper[4903]: E1126 22:39:24.938490 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/designate-operator-controller-manager-955677c94-rtztw" podUID="63feada5-3911-469e-a0b1-539b7aa2948d" Nov 26 22:39:24 crc kubenswrapper[4903]: E1126 22:39:24.953928 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-t5gqj" podUID="9c3a16ab-252a-4a01-aaab-b273d3d55c0a" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.373499 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" event={"ID":"6b930423-80e6-4e2c-825f-7deceec090f5","Type":"ContainerStarted","Data":"5caa5dd3b88e9a45387741a2cf99415a75441df74a7012a22d889a891a78c9de"} Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.374456 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.399274 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s" event={"ID":"e3d89c00-9723-43a3-a1d2-866787257900","Type":"ContainerStarted","Data":"b691adbcad174be2f8ac84ecf135890d8ff70504f4f60f010e91ae9aacfd14aa"} Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.399834 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" podStartSLOduration=11.863875061 podStartE2EDuration="37.399818941s" podCreationTimestamp="2025-11-26 22:38:48 +0000 UTC" firstStartedPulling="2025-11-26 22:38:51.012539114 +0000 UTC m=+1059.702774024" lastFinishedPulling="2025-11-26 22:39:16.548482984 +0000 UTC m=+1085.238717904" observedRunningTime="2025-11-26 22:39:25.391968502 +0000 UTC m=+1094.082203412" watchObservedRunningTime="2025-11-26 22:39:25.399818941 +0000 UTC m=+1094.090053851" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.438045 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5kmlf" event={"ID":"fcacd7dc-2b08-46d7-98c2-09cf6b6d690b","Type":"ContainerStarted","Data":"855b162bf2dded08384b194c455501f194673fa7aa287fa133c83e101e5b9de3"} Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.439124 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5kmlf" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.444069 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5kmlf" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.471901 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jn49q" event={"ID":"0c7b8e09-c502-425e-ac59-b2befd1132fa","Type":"ContainerStarted","Data":"3eb1497e74846a694b0ccf15c6f4474a55b4afd90b5df5bc7b16a4bc6fdb7c1b"} Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.472644 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jn49q" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.486188 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-6986c4df8b-bkqnw" event={"ID":"3f2ebc07-fbfc-4bd6-9622-63b820e47247","Type":"ContainerStarted","Data":"7f5527276adbc1ccbf4d9cec1e076c31d179155bc086f10d0dd3dbda2956b6d6"} Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.486939 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-6986c4df8b-bkqnw" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.487142 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jn49q" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.492845 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-n7krq" event={"ID":"d9a3465f-cd49-4af9-a908-58aec0273dbe","Type":"ContainerStarted","Data":"e3443c4c7e709064f53dd1264dbc7c3d7295e0c4488df7f409c294c5f5195d1d"} Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.495298 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-n7krq" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.498530 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5kmlf" podStartSLOduration=4.801560776 podStartE2EDuration="37.498508921s" podCreationTimestamp="2025-11-26 22:38:48 +0000 UTC" firstStartedPulling="2025-11-26 22:38:50.724479985 +0000 UTC m=+1059.414714895" lastFinishedPulling="2025-11-26 22:39:23.42142813 +0000 UTC m=+1092.111663040" observedRunningTime="2025-11-26 22:39:25.491054444 +0000 UTC m=+1094.181289354" watchObservedRunningTime="2025-11-26 22:39:25.498508921 +0000 UTC m=+1094.188743821" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.499357 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-n7krq" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.520925 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vjt6h" event={"ID":"83927c87-ccd7-4b29-97b1-8d03ce0d1b1e","Type":"ContainerStarted","Data":"bcad912131e09f383bce43bea8b024f949c83e5043c528b8bd644f1c5a0858e0"} Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.522957 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vjt6h" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.528031 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jn49q" podStartSLOduration=5.080962705 podStartE2EDuration="37.528009244s" podCreationTimestamp="2025-11-26 22:38:48 +0000 UTC" firstStartedPulling="2025-11-26 22:38:50.974249526 +0000 UTC m=+1059.664484436" lastFinishedPulling="2025-11-26 22:39:23.421296055 +0000 UTC m=+1092.111530975" observedRunningTime="2025-11-26 22:39:25.52143704 +0000 UTC m=+1094.211671950" watchObservedRunningTime="2025-11-26 22:39:25.528009244 +0000 UTC m=+1094.218244154" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.535943 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vjt6h" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.540783 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54" event={"ID":"d4e9967e-dcf0-42c1-94fc-fea289ed54c2","Type":"ContainerStarted","Data":"1e60edb69063f3e11bbd5a071808d7804d7bfe3464debd636a5d99bc7d9b4b04"} Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.540844 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.554682 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-t5gqj" event={"ID":"9c3a16ab-252a-4a01-aaab-b273d3d55c0a","Type":"ContainerStarted","Data":"85fe7d3107d15234a270a5b05474d4ceec019c15ddcfbdbb0377295c2b0cded1"} Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.557293 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-6986c4df8b-bkqnw" podStartSLOduration=9.113590954 podStartE2EDuration="37.557275572s" podCreationTimestamp="2025-11-26 22:38:48 +0000 UTC" firstStartedPulling="2025-11-26 22:38:50.998136991 +0000 UTC m=+1059.688371901" lastFinishedPulling="2025-11-26 22:39:19.441821609 +0000 UTC m=+1088.132056519" observedRunningTime="2025-11-26 22:39:25.546110855 +0000 UTC m=+1094.236345765" watchObservedRunningTime="2025-11-26 22:39:25.557275572 +0000 UTC m=+1094.247510482" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.566123 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gw5wx" event={"ID":"1a890e26-66fb-47d6-85dc-ae6b9045e4c6","Type":"ContainerStarted","Data":"0ba8eeda1763ec9cdbde400b4400d91db0636e15126033b241383bb989a2cc0e"} Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.567835 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gw5wx" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.572563 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-6hzbx" event={"ID":"3e621847-5f60-491a-8e5c-f2fb10df1726","Type":"ContainerStarted","Data":"9a42602398d20264c197081a2a98c0e89e45ed54d1eaaa8d64b507428fc96ccd"} Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.574181 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-6hzbx" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.575444 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-rtztw" event={"ID":"63feada5-3911-469e-a0b1-539b7aa2948d","Type":"ContainerStarted","Data":"605c7547f0f6969976e9fa452b09472d12672ddaf96bc75cae7abb918fbac29f"} Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.583754 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-6hzbx" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.588095 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54" podStartSLOduration=32.869380996 podStartE2EDuration="37.58807902s" podCreationTimestamp="2025-11-26 22:38:48 +0000 UTC" firstStartedPulling="2025-11-26 22:39:18.541284068 +0000 UTC m=+1087.231518988" lastFinishedPulling="2025-11-26 22:39:23.259982082 +0000 UTC m=+1091.950217012" observedRunningTime="2025-11-26 22:39:25.582113981 +0000 UTC m=+1094.272348891" watchObservedRunningTime="2025-11-26 22:39:25.58807902 +0000 UTC m=+1094.278313930" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.597210 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66" event={"ID":"34b48ba8-04a0-463d-9e31-b7c13127ce9c","Type":"ContainerStarted","Data":"f60e0ef51bbbcfc1d2e7f6531b79cabb4ffd72a207a84752706f7deea5a796d7"} Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.598185 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.606358 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.610777 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-n7krq" podStartSLOduration=3.83081088 podStartE2EDuration="37.610755891s" podCreationTimestamp="2025-11-26 22:38:48 +0000 UTC" firstStartedPulling="2025-11-26 22:38:49.659136917 +0000 UTC m=+1058.349371827" lastFinishedPulling="2025-11-26 22:39:23.439081928 +0000 UTC m=+1092.129316838" observedRunningTime="2025-11-26 22:39:25.604087684 +0000 UTC m=+1094.294322594" watchObservedRunningTime="2025-11-26 22:39:25.610755891 +0000 UTC m=+1094.300990801" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.620915 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vj562" event={"ID":"edfb7faf-e9af-4ee8-85cd-a11af5812946","Type":"ContainerStarted","Data":"458d6d36609a29c09b3d855cad8faa596fc1e945d6920dca3a6681963dbd61bc"} Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.636469 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-pzwmk" event={"ID":"32ccd880-8dfa-46d1-b262-5d10422527ec","Type":"ContainerStarted","Data":"e869d22f8efcd326ec8bb82bfbacf68f46967bf99cfc8f634fdf29f5381bf4d5"} Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.637244 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-pzwmk" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.644955 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vjt6h" podStartSLOduration=4.894617398 podStartE2EDuration="37.64493901s" podCreationTimestamp="2025-11-26 22:38:48 +0000 UTC" firstStartedPulling="2025-11-26 22:38:50.676934872 +0000 UTC m=+1059.367169782" lastFinishedPulling="2025-11-26 22:39:23.427256474 +0000 UTC m=+1092.117491394" observedRunningTime="2025-11-26 22:39:25.639011092 +0000 UTC m=+1094.329245992" watchObservedRunningTime="2025-11-26 22:39:25.64493901 +0000 UTC m=+1094.335173920" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.652935 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bwfhp" event={"ID":"f8815d8e-4b34-47b3-98fa-8370205381e0","Type":"ContainerStarted","Data":"c271b7e120c20244e09dcf560c88ffdb4ff593c9b70da725534280e8f49b52af"} Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.653974 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bwfhp" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.668862 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw" event={"ID":"b34e8bed-559a-49d6-b870-c375f36be49f","Type":"ContainerStarted","Data":"b143ede0dab1ba8c8efe2e66d1ecf96f670fddc08453252bf52c72782c4ee54e"} Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.668914 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.672684 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-6hzbx" podStartSLOduration=3.992645076 podStartE2EDuration="37.672673685s" podCreationTimestamp="2025-11-26 22:38:48 +0000 UTC" firstStartedPulling="2025-11-26 22:38:49.674588097 +0000 UTC m=+1058.364823007" lastFinishedPulling="2025-11-26 22:39:23.354616696 +0000 UTC m=+1092.044851616" observedRunningTime="2025-11-26 22:39:25.671862264 +0000 UTC m=+1094.362097174" watchObservedRunningTime="2025-11-26 22:39:25.672673685 +0000 UTC m=+1094.362908595" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.745460 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vj562" podStartSLOduration=5.037986083 podStartE2EDuration="37.745443017s" podCreationTimestamp="2025-11-26 22:38:48 +0000 UTC" firstStartedPulling="2025-11-26 22:38:50.714674704 +0000 UTC m=+1059.404909614" lastFinishedPulling="2025-11-26 22:39:23.422131638 +0000 UTC m=+1092.112366548" observedRunningTime="2025-11-26 22:39:25.735715289 +0000 UTC m=+1094.425950199" watchObservedRunningTime="2025-11-26 22:39:25.745443017 +0000 UTC m=+1094.435677917" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.844311 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66" podStartSLOduration=5.128966501 podStartE2EDuration="37.844287203s" podCreationTimestamp="2025-11-26 22:38:48 +0000 UTC" firstStartedPulling="2025-11-26 22:38:50.708413438 +0000 UTC m=+1059.398648348" lastFinishedPulling="2025-11-26 22:39:23.42373414 +0000 UTC m=+1092.113969050" observedRunningTime="2025-11-26 22:39:25.763288222 +0000 UTC m=+1094.453523132" watchObservedRunningTime="2025-11-26 22:39:25.844287203 +0000 UTC m=+1094.534522103" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.856106 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-pzwmk" podStartSLOduration=11.66691363 podStartE2EDuration="37.856091225s" podCreationTimestamp="2025-11-26 22:38:48 +0000 UTC" firstStartedPulling="2025-11-26 22:38:51.005739113 +0000 UTC m=+1059.695974023" lastFinishedPulling="2025-11-26 22:39:17.194916708 +0000 UTC m=+1085.885151618" observedRunningTime="2025-11-26 22:39:25.851795902 +0000 UTC m=+1094.542030812" watchObservedRunningTime="2025-11-26 22:39:25.856091225 +0000 UTC m=+1094.546326135" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.878241 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gw5wx" podStartSLOduration=12.352122624 podStartE2EDuration="37.878221243s" podCreationTimestamp="2025-11-26 22:38:48 +0000 UTC" firstStartedPulling="2025-11-26 22:38:51.023548076 +0000 UTC m=+1059.713782986" lastFinishedPulling="2025-11-26 22:39:16.549646685 +0000 UTC m=+1085.239881605" observedRunningTime="2025-11-26 22:39:25.871260808 +0000 UTC m=+1094.561495718" watchObservedRunningTime="2025-11-26 22:39:25.878221243 +0000 UTC m=+1094.568456153" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.895279 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bwfhp" podStartSLOduration=12.336424208 podStartE2EDuration="37.895262466s" podCreationTimestamp="2025-11-26 22:38:48 +0000 UTC" firstStartedPulling="2025-11-26 22:38:50.989356598 +0000 UTC m=+1059.679591498" lastFinishedPulling="2025-11-26 22:39:16.548194856 +0000 UTC m=+1085.238429756" observedRunningTime="2025-11-26 22:39:25.892318788 +0000 UTC m=+1094.582553708" watchObservedRunningTime="2025-11-26 22:39:25.895262466 +0000 UTC m=+1094.585497376" Nov 26 22:39:25 crc kubenswrapper[4903]: I1126 22:39:25.940845 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw" podStartSLOduration=33.254363527 podStartE2EDuration="37.940826216s" podCreationTimestamp="2025-11-26 22:38:48 +0000 UTC" firstStartedPulling="2025-11-26 22:39:18.541420341 +0000 UTC m=+1087.231655291" lastFinishedPulling="2025-11-26 22:39:23.22788306 +0000 UTC m=+1091.918117980" observedRunningTime="2025-11-26 22:39:25.932948646 +0000 UTC m=+1094.623183556" watchObservedRunningTime="2025-11-26 22:39:25.940826216 +0000 UTC m=+1094.631061126" Nov 26 22:39:26 crc kubenswrapper[4903]: I1126 22:39:26.672600 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-t5gqj" event={"ID":"9c3a16ab-252a-4a01-aaab-b273d3d55c0a","Type":"ContainerStarted","Data":"7ec228a4a74f23af90bbdfabddbd0256ae9bf23a9046b007711242d44d17e32b"} Nov 26 22:39:26 crc kubenswrapper[4903]: I1126 22:39:26.674357 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr" event={"ID":"e0c12217-0537-436e-b0d9-5e5049888268","Type":"ContainerStarted","Data":"5f2f48bbdbfbd4e10f2d62fb208a61cababa73e65d09327a3127865af38185d9"} Nov 26 22:39:26 crc kubenswrapper[4903]: I1126 22:39:26.674475 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr" Nov 26 22:39:26 crc kubenswrapper[4903]: I1126 22:39:26.675858 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s" event={"ID":"e3d89c00-9723-43a3-a1d2-866787257900","Type":"ContainerStarted","Data":"8beb6b6cf96c4e36ef6da37a31eef6c5e78e2d0b7f34dbf7a4ff45693dd1b468"} Nov 26 22:39:26 crc kubenswrapper[4903]: I1126 22:39:26.676671 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s" Nov 26 22:39:26 crc kubenswrapper[4903]: I1126 22:39:26.685058 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-rtztw" event={"ID":"63feada5-3911-469e-a0b1-539b7aa2948d","Type":"ContainerStarted","Data":"822d7862abecb0b1fc1838b71d4c28f8aaeb12e03f49f915cbe9e3f1c99e470e"} Nov 26 22:39:26 crc kubenswrapper[4903]: I1126 22:39:26.685237 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-955677c94-rtztw" Nov 26 22:39:26 crc kubenswrapper[4903]: I1126 22:39:26.686982 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" event={"ID":"ced64189-a8c9-4e13-956b-f69139a9602b","Type":"ContainerStarted","Data":"b0c82492969d9ad2b32b84c61b0c3510a7d57217a9f52034e2506cd6c78405d8"} Nov 26 22:39:26 crc kubenswrapper[4903]: I1126 22:39:26.687189 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" Nov 26 22:39:26 crc kubenswrapper[4903]: I1126 22:39:26.688980 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-shqxg" event={"ID":"710215b7-5e67-47d8-833f-b8db638cac56","Type":"ContainerStarted","Data":"591fc07ae4101cbae3b0de0f9156300eada80b98d575b5d0d91fefc032dc50cb"} Nov 26 22:39:26 crc kubenswrapper[4903]: I1126 22:39:26.689077 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-shqxg" Nov 26 22:39:26 crc kubenswrapper[4903]: I1126 22:39:26.690791 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2h7mb" event={"ID":"736b757c-8584-4b59-81d6-ffdd8bbac62c","Type":"ContainerStarted","Data":"46eafe262e8750d44bb32c6594ce8ba245e7fb89e08540b2174623a5fd00f013"} Nov 26 22:39:26 crc kubenswrapper[4903]: I1126 22:39:26.691764 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vj562" Nov 26 22:39:26 crc kubenswrapper[4903]: I1126 22:39:26.697297 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vj562" Nov 26 22:39:26 crc kubenswrapper[4903]: I1126 22:39:26.708888 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-t5gqj" podStartSLOduration=3.352009607 podStartE2EDuration="38.708874089s" podCreationTimestamp="2025-11-26 22:38:48 +0000 UTC" firstStartedPulling="2025-11-26 22:38:50.699504051 +0000 UTC m=+1059.389738961" lastFinishedPulling="2025-11-26 22:39:26.056368533 +0000 UTC m=+1094.746603443" observedRunningTime="2025-11-26 22:39:26.70135688 +0000 UTC m=+1095.391591790" watchObservedRunningTime="2025-11-26 22:39:26.708874089 +0000 UTC m=+1095.399108999" Nov 26 22:39:26 crc kubenswrapper[4903]: I1126 22:39:26.730622 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s" podStartSLOduration=3.439029178 podStartE2EDuration="38.730599367s" podCreationTimestamp="2025-11-26 22:38:48 +0000 UTC" firstStartedPulling="2025-11-26 22:38:50.676888791 +0000 UTC m=+1059.367123701" lastFinishedPulling="2025-11-26 22:39:25.96845898 +0000 UTC m=+1094.658693890" observedRunningTime="2025-11-26 22:39:26.722168383 +0000 UTC m=+1095.412403293" watchObservedRunningTime="2025-11-26 22:39:26.730599367 +0000 UTC m=+1095.420834277" Nov 26 22:39:26 crc kubenswrapper[4903]: I1126 22:39:26.759452 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-955677c94-rtztw" podStartSLOduration=3.2647668899999998 podStartE2EDuration="38.759429532s" podCreationTimestamp="2025-11-26 22:38:48 +0000 UTC" firstStartedPulling="2025-11-26 22:38:50.748835641 +0000 UTC m=+1059.439070551" lastFinishedPulling="2025-11-26 22:39:26.243498283 +0000 UTC m=+1094.933733193" observedRunningTime="2025-11-26 22:39:26.75749025 +0000 UTC m=+1095.447725180" watchObservedRunningTime="2025-11-26 22:39:26.759429532 +0000 UTC m=+1095.449664442" Nov 26 22:39:26 crc kubenswrapper[4903]: I1126 22:39:26.815013 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr" podStartSLOduration=3.412305088 podStartE2EDuration="38.814992847s" podCreationTimestamp="2025-11-26 22:38:48 +0000 UTC" firstStartedPulling="2025-11-26 22:38:50.758347234 +0000 UTC m=+1059.448582144" lastFinishedPulling="2025-11-26 22:39:26.161034993 +0000 UTC m=+1094.851269903" observedRunningTime="2025-11-26 22:39:26.795470179 +0000 UTC m=+1095.485705089" watchObservedRunningTime="2025-11-26 22:39:26.814992847 +0000 UTC m=+1095.505227757" Nov 26 22:39:26 crc kubenswrapper[4903]: I1126 22:39:26.835117 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-shqxg" podStartSLOduration=4.219973495 podStartE2EDuration="38.835101192s" podCreationTimestamp="2025-11-26 22:38:48 +0000 UTC" firstStartedPulling="2025-11-26 22:38:50.724629489 +0000 UTC m=+1059.414864399" lastFinishedPulling="2025-11-26 22:39:25.339757186 +0000 UTC m=+1094.029992096" observedRunningTime="2025-11-26 22:39:26.826136773 +0000 UTC m=+1095.516371683" watchObservedRunningTime="2025-11-26 22:39:26.835101192 +0000 UTC m=+1095.525336102" Nov 26 22:39:26 crc kubenswrapper[4903]: I1126 22:39:26.848763 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2h7mb" podStartSLOduration=4.686915402 podStartE2EDuration="38.848746533s" podCreationTimestamp="2025-11-26 22:38:48 +0000 UTC" firstStartedPulling="2025-11-26 22:38:51.177979547 +0000 UTC m=+1059.868214457" lastFinishedPulling="2025-11-26 22:39:25.339810688 +0000 UTC m=+1094.030045588" observedRunningTime="2025-11-26 22:39:26.84409674 +0000 UTC m=+1095.534331650" watchObservedRunningTime="2025-11-26 22:39:26.848746533 +0000 UTC m=+1095.538981443" Nov 26 22:39:26 crc kubenswrapper[4903]: I1126 22:39:26.866273 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" podStartSLOduration=3.919174247 podStartE2EDuration="38.866239648s" podCreationTimestamp="2025-11-26 22:38:48 +0000 UTC" firstStartedPulling="2025-11-26 22:38:50.67682919 +0000 UTC m=+1059.367064100" lastFinishedPulling="2025-11-26 22:39:25.623894591 +0000 UTC m=+1094.314129501" observedRunningTime="2025-11-26 22:39:26.860814544 +0000 UTC m=+1095.551049464" watchObservedRunningTime="2025-11-26 22:39:26.866239648 +0000 UTC m=+1095.556474558" Nov 26 22:39:27 crc kubenswrapper[4903]: I1126 22:39:27.701575 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-t5gqj" Nov 26 22:39:27 crc kubenswrapper[4903]: I1126 22:39:27.701621 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2h7mb" Nov 26 22:39:29 crc kubenswrapper[4903]: I1126 22:39:29.090517 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-pzwmk" Nov 26 22:39:29 crc kubenswrapper[4903]: I1126 22:39:29.284770 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-6986c4df8b-bkqnw" Nov 26 22:39:29 crc kubenswrapper[4903]: I1126 22:39:29.405727 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" Nov 26 22:39:29 crc kubenswrapper[4903]: I1126 22:39:29.609433 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gw5wx" Nov 26 22:39:29 crc kubenswrapper[4903]: I1126 22:39:29.614582 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bwfhp" Nov 26 22:39:34 crc kubenswrapper[4903]: I1126 22:39:34.467018 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw" Nov 26 22:39:35 crc kubenswrapper[4903]: I1126 22:39:35.058634 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54" Nov 26 22:39:35 crc kubenswrapper[4903]: I1126 22:39:35.451424 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" Nov 26 22:39:38 crc kubenswrapper[4903]: I1126 22:39:38.721452 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-955677c94-rtztw" Nov 26 22:39:38 crc kubenswrapper[4903]: I1126 22:39:38.744607 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-shqxg" Nov 26 22:39:38 crc kubenswrapper[4903]: I1126 22:39:38.813687 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr" Nov 26 22:39:38 crc kubenswrapper[4903]: I1126 22:39:38.900159 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" Nov 26 22:39:39 crc kubenswrapper[4903]: I1126 22:39:39.026270 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-t5gqj" Nov 26 22:39:39 crc kubenswrapper[4903]: I1126 22:39:39.095375 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s" Nov 26 22:39:39 crc kubenswrapper[4903]: I1126 22:39:39.543797 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2h7mb" Nov 26 22:40:02 crc kubenswrapper[4903]: I1126 22:40:02.154237 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-lqctt"] Nov 26 22:40:02 crc kubenswrapper[4903]: I1126 22:40:02.159139 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-lqctt" Nov 26 22:40:02 crc kubenswrapper[4903]: I1126 22:40:02.162113 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 26 22:40:02 crc kubenswrapper[4903]: I1126 22:40:02.162358 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 26 22:40:02 crc kubenswrapper[4903]: I1126 22:40:02.162527 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 26 22:40:02 crc kubenswrapper[4903]: I1126 22:40:02.162824 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-4g2ls" Nov 26 22:40:02 crc kubenswrapper[4903]: I1126 22:40:02.166504 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-lqctt"] Nov 26 22:40:02 crc kubenswrapper[4903]: I1126 22:40:02.237824 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-s4l25"] Nov 26 22:40:02 crc kubenswrapper[4903]: I1126 22:40:02.239129 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-s4l25" Nov 26 22:40:02 crc kubenswrapper[4903]: I1126 22:40:02.248122 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 26 22:40:02 crc kubenswrapper[4903]: I1126 22:40:02.248942 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-s4l25"] Nov 26 22:40:02 crc kubenswrapper[4903]: I1126 22:40:02.300582 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tt2tj\" (UniqueName: \"kubernetes.io/projected/b5fa15be-adee-4af2-a008-61e95b025d3a-kube-api-access-tt2tj\") pod \"dnsmasq-dns-675f4bcbfc-lqctt\" (UID: \"b5fa15be-adee-4af2-a008-61e95b025d3a\") " pod="openstack/dnsmasq-dns-675f4bcbfc-lqctt" Nov 26 22:40:02 crc kubenswrapper[4903]: I1126 22:40:02.300650 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5fa15be-adee-4af2-a008-61e95b025d3a-config\") pod \"dnsmasq-dns-675f4bcbfc-lqctt\" (UID: \"b5fa15be-adee-4af2-a008-61e95b025d3a\") " pod="openstack/dnsmasq-dns-675f4bcbfc-lqctt" Nov 26 22:40:02 crc kubenswrapper[4903]: I1126 22:40:02.402520 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tt2tj\" (UniqueName: \"kubernetes.io/projected/b5fa15be-adee-4af2-a008-61e95b025d3a-kube-api-access-tt2tj\") pod \"dnsmasq-dns-675f4bcbfc-lqctt\" (UID: \"b5fa15be-adee-4af2-a008-61e95b025d3a\") " pod="openstack/dnsmasq-dns-675f4bcbfc-lqctt" Nov 26 22:40:02 crc kubenswrapper[4903]: I1126 22:40:02.402583 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d5d45d8-1f13-4a89-8c86-573963d436ec-config\") pod \"dnsmasq-dns-78dd6ddcc-s4l25\" (UID: \"0d5d45d8-1f13-4a89-8c86-573963d436ec\") " pod="openstack/dnsmasq-dns-78dd6ddcc-s4l25" Nov 26 22:40:02 crc kubenswrapper[4903]: I1126 22:40:02.402611 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5fa15be-adee-4af2-a008-61e95b025d3a-config\") pod \"dnsmasq-dns-675f4bcbfc-lqctt\" (UID: \"b5fa15be-adee-4af2-a008-61e95b025d3a\") " pod="openstack/dnsmasq-dns-675f4bcbfc-lqctt" Nov 26 22:40:02 crc kubenswrapper[4903]: I1126 22:40:02.402668 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0d5d45d8-1f13-4a89-8c86-573963d436ec-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-s4l25\" (UID: \"0d5d45d8-1f13-4a89-8c86-573963d436ec\") " pod="openstack/dnsmasq-dns-78dd6ddcc-s4l25" Nov 26 22:40:02 crc kubenswrapper[4903]: I1126 22:40:02.402770 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jmfkh\" (UniqueName: \"kubernetes.io/projected/0d5d45d8-1f13-4a89-8c86-573963d436ec-kube-api-access-jmfkh\") pod \"dnsmasq-dns-78dd6ddcc-s4l25\" (UID: \"0d5d45d8-1f13-4a89-8c86-573963d436ec\") " pod="openstack/dnsmasq-dns-78dd6ddcc-s4l25" Nov 26 22:40:02 crc kubenswrapper[4903]: I1126 22:40:02.403764 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5fa15be-adee-4af2-a008-61e95b025d3a-config\") pod \"dnsmasq-dns-675f4bcbfc-lqctt\" (UID: \"b5fa15be-adee-4af2-a008-61e95b025d3a\") " pod="openstack/dnsmasq-dns-675f4bcbfc-lqctt" Nov 26 22:40:02 crc kubenswrapper[4903]: I1126 22:40:02.425406 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tt2tj\" (UniqueName: \"kubernetes.io/projected/b5fa15be-adee-4af2-a008-61e95b025d3a-kube-api-access-tt2tj\") pod \"dnsmasq-dns-675f4bcbfc-lqctt\" (UID: \"b5fa15be-adee-4af2-a008-61e95b025d3a\") " pod="openstack/dnsmasq-dns-675f4bcbfc-lqctt" Nov 26 22:40:02 crc kubenswrapper[4903]: I1126 22:40:02.504597 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jmfkh\" (UniqueName: \"kubernetes.io/projected/0d5d45d8-1f13-4a89-8c86-573963d436ec-kube-api-access-jmfkh\") pod \"dnsmasq-dns-78dd6ddcc-s4l25\" (UID: \"0d5d45d8-1f13-4a89-8c86-573963d436ec\") " pod="openstack/dnsmasq-dns-78dd6ddcc-s4l25" Nov 26 22:40:02 crc kubenswrapper[4903]: I1126 22:40:02.504963 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d5d45d8-1f13-4a89-8c86-573963d436ec-config\") pod \"dnsmasq-dns-78dd6ddcc-s4l25\" (UID: \"0d5d45d8-1f13-4a89-8c86-573963d436ec\") " pod="openstack/dnsmasq-dns-78dd6ddcc-s4l25" Nov 26 22:40:02 crc kubenswrapper[4903]: I1126 22:40:02.505678 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d5d45d8-1f13-4a89-8c86-573963d436ec-config\") pod \"dnsmasq-dns-78dd6ddcc-s4l25\" (UID: \"0d5d45d8-1f13-4a89-8c86-573963d436ec\") " pod="openstack/dnsmasq-dns-78dd6ddcc-s4l25" Nov 26 22:40:02 crc kubenswrapper[4903]: I1126 22:40:02.505813 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0d5d45d8-1f13-4a89-8c86-573963d436ec-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-s4l25\" (UID: \"0d5d45d8-1f13-4a89-8c86-573963d436ec\") " pod="openstack/dnsmasq-dns-78dd6ddcc-s4l25" Nov 26 22:40:02 crc kubenswrapper[4903]: I1126 22:40:02.506719 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0d5d45d8-1f13-4a89-8c86-573963d436ec-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-s4l25\" (UID: \"0d5d45d8-1f13-4a89-8c86-573963d436ec\") " pod="openstack/dnsmasq-dns-78dd6ddcc-s4l25" Nov 26 22:40:02 crc kubenswrapper[4903]: I1126 22:40:02.515935 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-lqctt" Nov 26 22:40:02 crc kubenswrapper[4903]: I1126 22:40:02.533872 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jmfkh\" (UniqueName: \"kubernetes.io/projected/0d5d45d8-1f13-4a89-8c86-573963d436ec-kube-api-access-jmfkh\") pod \"dnsmasq-dns-78dd6ddcc-s4l25\" (UID: \"0d5d45d8-1f13-4a89-8c86-573963d436ec\") " pod="openstack/dnsmasq-dns-78dd6ddcc-s4l25" Nov 26 22:40:02 crc kubenswrapper[4903]: I1126 22:40:02.567532 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-s4l25" Nov 26 22:40:03 crc kubenswrapper[4903]: I1126 22:40:03.013283 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-lqctt"] Nov 26 22:40:03 crc kubenswrapper[4903]: I1126 22:40:03.080555 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-s4l25"] Nov 26 22:40:03 crc kubenswrapper[4903]: W1126 22:40:03.081025 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0d5d45d8_1f13_4a89_8c86_573963d436ec.slice/crio-d48f77f861b2eb44c15c8a9b1e20b14918d1770377852fc7e0957af73d138f80 WatchSource:0}: Error finding container d48f77f861b2eb44c15c8a9b1e20b14918d1770377852fc7e0957af73d138f80: Status 404 returned error can't find the container with id d48f77f861b2eb44c15c8a9b1e20b14918d1770377852fc7e0957af73d138f80 Nov 26 22:40:03 crc kubenswrapper[4903]: I1126 22:40:03.104132 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-s4l25" event={"ID":"0d5d45d8-1f13-4a89-8c86-573963d436ec","Type":"ContainerStarted","Data":"d48f77f861b2eb44c15c8a9b1e20b14918d1770377852fc7e0957af73d138f80"} Nov 26 22:40:03 crc kubenswrapper[4903]: I1126 22:40:03.105562 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-lqctt" event={"ID":"b5fa15be-adee-4af2-a008-61e95b025d3a","Type":"ContainerStarted","Data":"a50c047cee1b24f317930f171f76791942cdcac0e5adc2e07e2c571603888d68"} Nov 26 22:40:05 crc kubenswrapper[4903]: I1126 22:40:05.034193 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-lqctt"] Nov 26 22:40:05 crc kubenswrapper[4903]: I1126 22:40:05.050198 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-jg7s2"] Nov 26 22:40:05 crc kubenswrapper[4903]: I1126 22:40:05.051659 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-jg7s2" Nov 26 22:40:05 crc kubenswrapper[4903]: I1126 22:40:05.073482 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-jg7s2"] Nov 26 22:40:05 crc kubenswrapper[4903]: I1126 22:40:05.176144 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f090a22-bd8a-4d9c-a477-c8b80664bbc5-config\") pod \"dnsmasq-dns-666b6646f7-jg7s2\" (UID: \"7f090a22-bd8a-4d9c-a477-c8b80664bbc5\") " pod="openstack/dnsmasq-dns-666b6646f7-jg7s2" Nov 26 22:40:05 crc kubenswrapper[4903]: I1126 22:40:05.176457 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7f090a22-bd8a-4d9c-a477-c8b80664bbc5-dns-svc\") pod \"dnsmasq-dns-666b6646f7-jg7s2\" (UID: \"7f090a22-bd8a-4d9c-a477-c8b80664bbc5\") " pod="openstack/dnsmasq-dns-666b6646f7-jg7s2" Nov 26 22:40:05 crc kubenswrapper[4903]: I1126 22:40:05.176543 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-psvf9\" (UniqueName: \"kubernetes.io/projected/7f090a22-bd8a-4d9c-a477-c8b80664bbc5-kube-api-access-psvf9\") pod \"dnsmasq-dns-666b6646f7-jg7s2\" (UID: \"7f090a22-bd8a-4d9c-a477-c8b80664bbc5\") " pod="openstack/dnsmasq-dns-666b6646f7-jg7s2" Nov 26 22:40:05 crc kubenswrapper[4903]: I1126 22:40:05.279892 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f090a22-bd8a-4d9c-a477-c8b80664bbc5-config\") pod \"dnsmasq-dns-666b6646f7-jg7s2\" (UID: \"7f090a22-bd8a-4d9c-a477-c8b80664bbc5\") " pod="openstack/dnsmasq-dns-666b6646f7-jg7s2" Nov 26 22:40:05 crc kubenswrapper[4903]: I1126 22:40:05.279938 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7f090a22-bd8a-4d9c-a477-c8b80664bbc5-dns-svc\") pod \"dnsmasq-dns-666b6646f7-jg7s2\" (UID: \"7f090a22-bd8a-4d9c-a477-c8b80664bbc5\") " pod="openstack/dnsmasq-dns-666b6646f7-jg7s2" Nov 26 22:40:05 crc kubenswrapper[4903]: I1126 22:40:05.279997 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-psvf9\" (UniqueName: \"kubernetes.io/projected/7f090a22-bd8a-4d9c-a477-c8b80664bbc5-kube-api-access-psvf9\") pod \"dnsmasq-dns-666b6646f7-jg7s2\" (UID: \"7f090a22-bd8a-4d9c-a477-c8b80664bbc5\") " pod="openstack/dnsmasq-dns-666b6646f7-jg7s2" Nov 26 22:40:05 crc kubenswrapper[4903]: I1126 22:40:05.282745 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f090a22-bd8a-4d9c-a477-c8b80664bbc5-config\") pod \"dnsmasq-dns-666b6646f7-jg7s2\" (UID: \"7f090a22-bd8a-4d9c-a477-c8b80664bbc5\") " pod="openstack/dnsmasq-dns-666b6646f7-jg7s2" Nov 26 22:40:05 crc kubenswrapper[4903]: I1126 22:40:05.283582 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7f090a22-bd8a-4d9c-a477-c8b80664bbc5-dns-svc\") pod \"dnsmasq-dns-666b6646f7-jg7s2\" (UID: \"7f090a22-bd8a-4d9c-a477-c8b80664bbc5\") " pod="openstack/dnsmasq-dns-666b6646f7-jg7s2" Nov 26 22:40:05 crc kubenswrapper[4903]: I1126 22:40:05.325096 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-psvf9\" (UniqueName: \"kubernetes.io/projected/7f090a22-bd8a-4d9c-a477-c8b80664bbc5-kube-api-access-psvf9\") pod \"dnsmasq-dns-666b6646f7-jg7s2\" (UID: \"7f090a22-bd8a-4d9c-a477-c8b80664bbc5\") " pod="openstack/dnsmasq-dns-666b6646f7-jg7s2" Nov 26 22:40:05 crc kubenswrapper[4903]: I1126 22:40:05.380142 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-jg7s2" Nov 26 22:40:05 crc kubenswrapper[4903]: I1126 22:40:05.393945 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-s4l25"] Nov 26 22:40:05 crc kubenswrapper[4903]: I1126 22:40:05.477798 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-b82qc"] Nov 26 22:40:05 crc kubenswrapper[4903]: I1126 22:40:05.479432 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-b82qc" Nov 26 22:40:05 crc kubenswrapper[4903]: I1126 22:40:05.515159 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-b82qc"] Nov 26 22:40:05 crc kubenswrapper[4903]: I1126 22:40:05.589008 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c6654bc-c2b5-45a0-93ab-c338beb90d3c-config\") pod \"dnsmasq-dns-57d769cc4f-b82qc\" (UID: \"0c6654bc-c2b5-45a0-93ab-c338beb90d3c\") " pod="openstack/dnsmasq-dns-57d769cc4f-b82qc" Nov 26 22:40:05 crc kubenswrapper[4903]: I1126 22:40:05.589275 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0c6654bc-c2b5-45a0-93ab-c338beb90d3c-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-b82qc\" (UID: \"0c6654bc-c2b5-45a0-93ab-c338beb90d3c\") " pod="openstack/dnsmasq-dns-57d769cc4f-b82qc" Nov 26 22:40:05 crc kubenswrapper[4903]: I1126 22:40:05.589317 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-898g2\" (UniqueName: \"kubernetes.io/projected/0c6654bc-c2b5-45a0-93ab-c338beb90d3c-kube-api-access-898g2\") pod \"dnsmasq-dns-57d769cc4f-b82qc\" (UID: \"0c6654bc-c2b5-45a0-93ab-c338beb90d3c\") " pod="openstack/dnsmasq-dns-57d769cc4f-b82qc" Nov 26 22:40:05 crc kubenswrapper[4903]: I1126 22:40:05.691043 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0c6654bc-c2b5-45a0-93ab-c338beb90d3c-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-b82qc\" (UID: \"0c6654bc-c2b5-45a0-93ab-c338beb90d3c\") " pod="openstack/dnsmasq-dns-57d769cc4f-b82qc" Nov 26 22:40:05 crc kubenswrapper[4903]: I1126 22:40:05.691098 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-898g2\" (UniqueName: \"kubernetes.io/projected/0c6654bc-c2b5-45a0-93ab-c338beb90d3c-kube-api-access-898g2\") pod \"dnsmasq-dns-57d769cc4f-b82qc\" (UID: \"0c6654bc-c2b5-45a0-93ab-c338beb90d3c\") " pod="openstack/dnsmasq-dns-57d769cc4f-b82qc" Nov 26 22:40:05 crc kubenswrapper[4903]: I1126 22:40:05.691214 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c6654bc-c2b5-45a0-93ab-c338beb90d3c-config\") pod \"dnsmasq-dns-57d769cc4f-b82qc\" (UID: \"0c6654bc-c2b5-45a0-93ab-c338beb90d3c\") " pod="openstack/dnsmasq-dns-57d769cc4f-b82qc" Nov 26 22:40:05 crc kubenswrapper[4903]: I1126 22:40:05.692206 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c6654bc-c2b5-45a0-93ab-c338beb90d3c-config\") pod \"dnsmasq-dns-57d769cc4f-b82qc\" (UID: \"0c6654bc-c2b5-45a0-93ab-c338beb90d3c\") " pod="openstack/dnsmasq-dns-57d769cc4f-b82qc" Nov 26 22:40:05 crc kubenswrapper[4903]: I1126 22:40:05.692653 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0c6654bc-c2b5-45a0-93ab-c338beb90d3c-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-b82qc\" (UID: \"0c6654bc-c2b5-45a0-93ab-c338beb90d3c\") " pod="openstack/dnsmasq-dns-57d769cc4f-b82qc" Nov 26 22:40:05 crc kubenswrapper[4903]: I1126 22:40:05.717462 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-898g2\" (UniqueName: \"kubernetes.io/projected/0c6654bc-c2b5-45a0-93ab-c338beb90d3c-kube-api-access-898g2\") pod \"dnsmasq-dns-57d769cc4f-b82qc\" (UID: \"0c6654bc-c2b5-45a0-93ab-c338beb90d3c\") " pod="openstack/dnsmasq-dns-57d769cc4f-b82qc" Nov 26 22:40:05 crc kubenswrapper[4903]: I1126 22:40:05.853919 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-b82qc" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.060122 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-jg7s2"] Nov 26 22:40:06 crc kubenswrapper[4903]: W1126 22:40:06.075058 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7f090a22_bd8a_4d9c_a477_c8b80664bbc5.slice/crio-6c80308d4053bce66fc2ab5192d822e30656c64c44104858218a91dbf67c8aea WatchSource:0}: Error finding container 6c80308d4053bce66fc2ab5192d822e30656c64c44104858218a91dbf67c8aea: Status 404 returned error can't find the container with id 6c80308d4053bce66fc2ab5192d822e30656c64c44104858218a91dbf67c8aea Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.153956 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-jg7s2" event={"ID":"7f090a22-bd8a-4d9c-a477-c8b80664bbc5","Type":"ContainerStarted","Data":"6c80308d4053bce66fc2ab5192d822e30656c64c44104858218a91dbf67c8aea"} Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.170216 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.171949 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.178057 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.180981 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.181038 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.181056 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.181141 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.181398 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.181548 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-xmp9c" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.219824 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.286502 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-b82qc"] Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.320393 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/a97b1b29-2461-47c7-a3f9-71837fe03413-pod-info\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.320463 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/a97b1b29-2461-47c7-a3f9-71837fe03413-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.320485 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a97b1b29-2461-47c7-a3f9-71837fe03413-config-data\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.320530 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/a97b1b29-2461-47c7-a3f9-71837fe03413-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.320557 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.320576 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7fg9r\" (UniqueName: \"kubernetes.io/projected/a97b1b29-2461-47c7-a3f9-71837fe03413-kube-api-access-7fg9r\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.320610 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/a97b1b29-2461-47c7-a3f9-71837fe03413-server-conf\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.320673 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/a97b1b29-2461-47c7-a3f9-71837fe03413-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.320731 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/a97b1b29-2461-47c7-a3f9-71837fe03413-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.320763 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/a97b1b29-2461-47c7-a3f9-71837fe03413-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.320805 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/a97b1b29-2461-47c7-a3f9-71837fe03413-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.422086 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/a97b1b29-2461-47c7-a3f9-71837fe03413-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.422141 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a97b1b29-2461-47c7-a3f9-71837fe03413-config-data\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.422176 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/a97b1b29-2461-47c7-a3f9-71837fe03413-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.422205 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.422225 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7fg9r\" (UniqueName: \"kubernetes.io/projected/a97b1b29-2461-47c7-a3f9-71837fe03413-kube-api-access-7fg9r\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.422256 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/a97b1b29-2461-47c7-a3f9-71837fe03413-server-conf\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.422273 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/a97b1b29-2461-47c7-a3f9-71837fe03413-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.422313 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/a97b1b29-2461-47c7-a3f9-71837fe03413-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.422342 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/a97b1b29-2461-47c7-a3f9-71837fe03413-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.422357 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/a97b1b29-2461-47c7-a3f9-71837fe03413-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.422389 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/a97b1b29-2461-47c7-a3f9-71837fe03413-pod-info\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.424227 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/a97b1b29-2461-47c7-a3f9-71837fe03413-server-conf\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.424245 4903 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.424773 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a97b1b29-2461-47c7-a3f9-71837fe03413-config-data\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.425381 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/a97b1b29-2461-47c7-a3f9-71837fe03413-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.425416 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/a97b1b29-2461-47c7-a3f9-71837fe03413-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.426675 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/a97b1b29-2461-47c7-a3f9-71837fe03413-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.430678 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/a97b1b29-2461-47c7-a3f9-71837fe03413-pod-info\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.431954 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/a97b1b29-2461-47c7-a3f9-71837fe03413-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.433122 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/a97b1b29-2461-47c7-a3f9-71837fe03413-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.440421 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7fg9r\" (UniqueName: \"kubernetes.io/projected/a97b1b29-2461-47c7-a3f9-71837fe03413-kube-api-access-7fg9r\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.441190 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/a97b1b29-2461-47c7-a3f9-71837fe03413-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.464888 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.497342 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.499625 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.503919 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.510166 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.510443 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.510664 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.511039 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.511524 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-qhzgz" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.511651 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.512235 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.514509 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.630369 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/910b9022-54fc-4f7d-b69b-bdb7661cb91d-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.630447 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/910b9022-54fc-4f7d-b69b-bdb7661cb91d-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.630467 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/910b9022-54fc-4f7d-b69b-bdb7661cb91d-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.630679 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/910b9022-54fc-4f7d-b69b-bdb7661cb91d-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.630793 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-thc2d\" (UniqueName: \"kubernetes.io/projected/910b9022-54fc-4f7d-b69b-bdb7661cb91d-kube-api-access-thc2d\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.630926 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/910b9022-54fc-4f7d-b69b-bdb7661cb91d-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.630973 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/910b9022-54fc-4f7d-b69b-bdb7661cb91d-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.631008 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/910b9022-54fc-4f7d-b69b-bdb7661cb91d-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.631034 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.631051 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/910b9022-54fc-4f7d-b69b-bdb7661cb91d-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.631078 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/910b9022-54fc-4f7d-b69b-bdb7661cb91d-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.733035 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/910b9022-54fc-4f7d-b69b-bdb7661cb91d-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.733396 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/910b9022-54fc-4f7d-b69b-bdb7661cb91d-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.733420 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/910b9022-54fc-4f7d-b69b-bdb7661cb91d-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.733453 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/910b9022-54fc-4f7d-b69b-bdb7661cb91d-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.733475 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-thc2d\" (UniqueName: \"kubernetes.io/projected/910b9022-54fc-4f7d-b69b-bdb7661cb91d-kube-api-access-thc2d\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.733509 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/910b9022-54fc-4f7d-b69b-bdb7661cb91d-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.733535 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/910b9022-54fc-4f7d-b69b-bdb7661cb91d-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.733555 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/910b9022-54fc-4f7d-b69b-bdb7661cb91d-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.733574 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.733590 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/910b9022-54fc-4f7d-b69b-bdb7661cb91d-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.733624 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/910b9022-54fc-4f7d-b69b-bdb7661cb91d-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.735009 4903 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.735285 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/910b9022-54fc-4f7d-b69b-bdb7661cb91d-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.735581 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/910b9022-54fc-4f7d-b69b-bdb7661cb91d-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.736158 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/910b9022-54fc-4f7d-b69b-bdb7661cb91d-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.736355 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/910b9022-54fc-4f7d-b69b-bdb7661cb91d-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.737138 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/910b9022-54fc-4f7d-b69b-bdb7661cb91d-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.739132 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/910b9022-54fc-4f7d-b69b-bdb7661cb91d-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.740865 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/910b9022-54fc-4f7d-b69b-bdb7661cb91d-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.740911 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/910b9022-54fc-4f7d-b69b-bdb7661cb91d-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.741560 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/910b9022-54fc-4f7d-b69b-bdb7661cb91d-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.753382 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-thc2d\" (UniqueName: \"kubernetes.io/projected/910b9022-54fc-4f7d-b69b-bdb7661cb91d-kube-api-access-thc2d\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.774823 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:06 crc kubenswrapper[4903]: I1126 22:40:06.900478 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:40:07 crc kubenswrapper[4903]: I1126 22:40:07.045438 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 22:40:07 crc kubenswrapper[4903]: I1126 22:40:07.162058 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-b82qc" event={"ID":"0c6654bc-c2b5-45a0-93ab-c338beb90d3c","Type":"ContainerStarted","Data":"4bc2a786659e06bac65141c0e9320203f67e6daa8bbfe28433289e7e76393487"} Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.095795 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.097559 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.103606 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.103867 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.103605 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-4mfcj" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.103770 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.108063 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.109612 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.162730 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1969a76-48dc-4a53-8ee9-f9b5a5670e30-operator-scripts\") pod \"openstack-galera-0\" (UID: \"b1969a76-48dc-4a53-8ee9-f9b5a5670e30\") " pod="openstack/openstack-galera-0" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.163002 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c4dbs\" (UniqueName: \"kubernetes.io/projected/b1969a76-48dc-4a53-8ee9-f9b5a5670e30-kube-api-access-c4dbs\") pod \"openstack-galera-0\" (UID: \"b1969a76-48dc-4a53-8ee9-f9b5a5670e30\") " pod="openstack/openstack-galera-0" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.163040 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b1969a76-48dc-4a53-8ee9-f9b5a5670e30-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"b1969a76-48dc-4a53-8ee9-f9b5a5670e30\") " pod="openstack/openstack-galera-0" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.163071 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b1969a76-48dc-4a53-8ee9-f9b5a5670e30-config-data-default\") pod \"openstack-galera-0\" (UID: \"b1969a76-48dc-4a53-8ee9-f9b5a5670e30\") " pod="openstack/openstack-galera-0" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.163145 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b1969a76-48dc-4a53-8ee9-f9b5a5670e30-kolla-config\") pod \"openstack-galera-0\" (UID: \"b1969a76-48dc-4a53-8ee9-f9b5a5670e30\") " pod="openstack/openstack-galera-0" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.163166 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b1969a76-48dc-4a53-8ee9-f9b5a5670e30-config-data-generated\") pod \"openstack-galera-0\" (UID: \"b1969a76-48dc-4a53-8ee9-f9b5a5670e30\") " pod="openstack/openstack-galera-0" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.163210 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"b1969a76-48dc-4a53-8ee9-f9b5a5670e30\") " pod="openstack/openstack-galera-0" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.163271 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1969a76-48dc-4a53-8ee9-f9b5a5670e30-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"b1969a76-48dc-4a53-8ee9-f9b5a5670e30\") " pod="openstack/openstack-galera-0" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.264599 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1969a76-48dc-4a53-8ee9-f9b5a5670e30-operator-scripts\") pod \"openstack-galera-0\" (UID: \"b1969a76-48dc-4a53-8ee9-f9b5a5670e30\") " pod="openstack/openstack-galera-0" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.264881 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c4dbs\" (UniqueName: \"kubernetes.io/projected/b1969a76-48dc-4a53-8ee9-f9b5a5670e30-kube-api-access-c4dbs\") pod \"openstack-galera-0\" (UID: \"b1969a76-48dc-4a53-8ee9-f9b5a5670e30\") " pod="openstack/openstack-galera-0" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.264992 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b1969a76-48dc-4a53-8ee9-f9b5a5670e30-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"b1969a76-48dc-4a53-8ee9-f9b5a5670e30\") " pod="openstack/openstack-galera-0" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.265074 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b1969a76-48dc-4a53-8ee9-f9b5a5670e30-config-data-default\") pod \"openstack-galera-0\" (UID: \"b1969a76-48dc-4a53-8ee9-f9b5a5670e30\") " pod="openstack/openstack-galera-0" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.265186 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b1969a76-48dc-4a53-8ee9-f9b5a5670e30-kolla-config\") pod \"openstack-galera-0\" (UID: \"b1969a76-48dc-4a53-8ee9-f9b5a5670e30\") " pod="openstack/openstack-galera-0" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.265264 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b1969a76-48dc-4a53-8ee9-f9b5a5670e30-config-data-generated\") pod \"openstack-galera-0\" (UID: \"b1969a76-48dc-4a53-8ee9-f9b5a5670e30\") " pod="openstack/openstack-galera-0" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.265343 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"b1969a76-48dc-4a53-8ee9-f9b5a5670e30\") " pod="openstack/openstack-galera-0" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.265431 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1969a76-48dc-4a53-8ee9-f9b5a5670e30-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"b1969a76-48dc-4a53-8ee9-f9b5a5670e30\") " pod="openstack/openstack-galera-0" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.266171 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1969a76-48dc-4a53-8ee9-f9b5a5670e30-operator-scripts\") pod \"openstack-galera-0\" (UID: \"b1969a76-48dc-4a53-8ee9-f9b5a5670e30\") " pod="openstack/openstack-galera-0" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.266363 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b1969a76-48dc-4a53-8ee9-f9b5a5670e30-kolla-config\") pod \"openstack-galera-0\" (UID: \"b1969a76-48dc-4a53-8ee9-f9b5a5670e30\") " pod="openstack/openstack-galera-0" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.266432 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b1969a76-48dc-4a53-8ee9-f9b5a5670e30-config-data-generated\") pod \"openstack-galera-0\" (UID: \"b1969a76-48dc-4a53-8ee9-f9b5a5670e30\") " pod="openstack/openstack-galera-0" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.267019 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b1969a76-48dc-4a53-8ee9-f9b5a5670e30-config-data-default\") pod \"openstack-galera-0\" (UID: \"b1969a76-48dc-4a53-8ee9-f9b5a5670e30\") " pod="openstack/openstack-galera-0" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.267288 4903 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"b1969a76-48dc-4a53-8ee9-f9b5a5670e30\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/openstack-galera-0" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.284033 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b1969a76-48dc-4a53-8ee9-f9b5a5670e30-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"b1969a76-48dc-4a53-8ee9-f9b5a5670e30\") " pod="openstack/openstack-galera-0" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.285513 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1969a76-48dc-4a53-8ee9-f9b5a5670e30-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"b1969a76-48dc-4a53-8ee9-f9b5a5670e30\") " pod="openstack/openstack-galera-0" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.305343 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c4dbs\" (UniqueName: \"kubernetes.io/projected/b1969a76-48dc-4a53-8ee9-f9b5a5670e30-kube-api-access-c4dbs\") pod \"openstack-galera-0\" (UID: \"b1969a76-48dc-4a53-8ee9-f9b5a5670e30\") " pod="openstack/openstack-galera-0" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.329271 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"b1969a76-48dc-4a53-8ee9-f9b5a5670e30\") " pod="openstack/openstack-galera-0" Nov 26 22:40:08 crc kubenswrapper[4903]: I1126 22:40:08.497048 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.643268 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.646287 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.649872 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-pd465" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.650380 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.650782 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.654947 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.659799 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.793292 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/aabcbcd0-4cc0-495d-b059-6b8722c47aa1-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"aabcbcd0-4cc0-495d-b059-6b8722c47aa1\") " pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.793364 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/aabcbcd0-4cc0-495d-b059-6b8722c47aa1-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"aabcbcd0-4cc0-495d-b059-6b8722c47aa1\") " pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.793422 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aabcbcd0-4cc0-495d-b059-6b8722c47aa1-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"aabcbcd0-4cc0-495d-b059-6b8722c47aa1\") " pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.793453 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/aabcbcd0-4cc0-495d-b059-6b8722c47aa1-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"aabcbcd0-4cc0-495d-b059-6b8722c47aa1\") " pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.793487 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7g7tl\" (UniqueName: \"kubernetes.io/projected/aabcbcd0-4cc0-495d-b059-6b8722c47aa1-kube-api-access-7g7tl\") pod \"openstack-cell1-galera-0\" (UID: \"aabcbcd0-4cc0-495d-b059-6b8722c47aa1\") " pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.793532 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aabcbcd0-4cc0-495d-b059-6b8722c47aa1-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"aabcbcd0-4cc0-495d-b059-6b8722c47aa1\") " pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.793596 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-cell1-galera-0\" (UID: \"aabcbcd0-4cc0-495d-b059-6b8722c47aa1\") " pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.793658 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/aabcbcd0-4cc0-495d-b059-6b8722c47aa1-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"aabcbcd0-4cc0-495d-b059-6b8722c47aa1\") " pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.794342 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.795477 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.798528 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-f8gjd" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.799002 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.799303 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.812734 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.895661 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aabcbcd0-4cc0-495d-b059-6b8722c47aa1-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"aabcbcd0-4cc0-495d-b059-6b8722c47aa1\") " pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.895727 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6fd08b11-1328-47a3-82a3-286d70df4394-config-data\") pod \"memcached-0\" (UID: \"6fd08b11-1328-47a3-82a3-286d70df4394\") " pod="openstack/memcached-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.895752 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/aabcbcd0-4cc0-495d-b059-6b8722c47aa1-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"aabcbcd0-4cc0-495d-b059-6b8722c47aa1\") " pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.895777 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rk96b\" (UniqueName: \"kubernetes.io/projected/6fd08b11-1328-47a3-82a3-286d70df4394-kube-api-access-rk96b\") pod \"memcached-0\" (UID: \"6fd08b11-1328-47a3-82a3-286d70df4394\") " pod="openstack/memcached-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.895799 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7g7tl\" (UniqueName: \"kubernetes.io/projected/aabcbcd0-4cc0-495d-b059-6b8722c47aa1-kube-api-access-7g7tl\") pod \"openstack-cell1-galera-0\" (UID: \"aabcbcd0-4cc0-495d-b059-6b8722c47aa1\") " pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.895982 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aabcbcd0-4cc0-495d-b059-6b8722c47aa1-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"aabcbcd0-4cc0-495d-b059-6b8722c47aa1\") " pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.896041 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6fd08b11-1328-47a3-82a3-286d70df4394-kolla-config\") pod \"memcached-0\" (UID: \"6fd08b11-1328-47a3-82a3-286d70df4394\") " pod="openstack/memcached-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.896069 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/6fd08b11-1328-47a3-82a3-286d70df4394-memcached-tls-certs\") pod \"memcached-0\" (UID: \"6fd08b11-1328-47a3-82a3-286d70df4394\") " pod="openstack/memcached-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.896168 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-cell1-galera-0\" (UID: \"aabcbcd0-4cc0-495d-b059-6b8722c47aa1\") " pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.896314 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/aabcbcd0-4cc0-495d-b059-6b8722c47aa1-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"aabcbcd0-4cc0-495d-b059-6b8722c47aa1\") " pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.896339 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fd08b11-1328-47a3-82a3-286d70df4394-combined-ca-bundle\") pod \"memcached-0\" (UID: \"6fd08b11-1328-47a3-82a3-286d70df4394\") " pod="openstack/memcached-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.896426 4903 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-cell1-galera-0\" (UID: \"aabcbcd0-4cc0-495d-b059-6b8722c47aa1\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.896558 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/aabcbcd0-4cc0-495d-b059-6b8722c47aa1-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"aabcbcd0-4cc0-495d-b059-6b8722c47aa1\") " pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.896615 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/aabcbcd0-4cc0-495d-b059-6b8722c47aa1-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"aabcbcd0-4cc0-495d-b059-6b8722c47aa1\") " pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.897038 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/aabcbcd0-4cc0-495d-b059-6b8722c47aa1-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"aabcbcd0-4cc0-495d-b059-6b8722c47aa1\") " pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.897264 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/aabcbcd0-4cc0-495d-b059-6b8722c47aa1-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"aabcbcd0-4cc0-495d-b059-6b8722c47aa1\") " pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.897320 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/aabcbcd0-4cc0-495d-b059-6b8722c47aa1-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"aabcbcd0-4cc0-495d-b059-6b8722c47aa1\") " pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.898373 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aabcbcd0-4cc0-495d-b059-6b8722c47aa1-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"aabcbcd0-4cc0-495d-b059-6b8722c47aa1\") " pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.902429 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/aabcbcd0-4cc0-495d-b059-6b8722c47aa1-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"aabcbcd0-4cc0-495d-b059-6b8722c47aa1\") " pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.904245 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aabcbcd0-4cc0-495d-b059-6b8722c47aa1-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"aabcbcd0-4cc0-495d-b059-6b8722c47aa1\") " pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.917326 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7g7tl\" (UniqueName: \"kubernetes.io/projected/aabcbcd0-4cc0-495d-b059-6b8722c47aa1-kube-api-access-7g7tl\") pod \"openstack-cell1-galera-0\" (UID: \"aabcbcd0-4cc0-495d-b059-6b8722c47aa1\") " pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.923394 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-cell1-galera-0\" (UID: \"aabcbcd0-4cc0-495d-b059-6b8722c47aa1\") " pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.997767 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rk96b\" (UniqueName: \"kubernetes.io/projected/6fd08b11-1328-47a3-82a3-286d70df4394-kube-api-access-rk96b\") pod \"memcached-0\" (UID: \"6fd08b11-1328-47a3-82a3-286d70df4394\") " pod="openstack/memcached-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.998251 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6fd08b11-1328-47a3-82a3-286d70df4394-kolla-config\") pod \"memcached-0\" (UID: \"6fd08b11-1328-47a3-82a3-286d70df4394\") " pod="openstack/memcached-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.998323 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/6fd08b11-1328-47a3-82a3-286d70df4394-memcached-tls-certs\") pod \"memcached-0\" (UID: \"6fd08b11-1328-47a3-82a3-286d70df4394\") " pod="openstack/memcached-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.998445 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fd08b11-1328-47a3-82a3-286d70df4394-combined-ca-bundle\") pod \"memcached-0\" (UID: \"6fd08b11-1328-47a3-82a3-286d70df4394\") " pod="openstack/memcached-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.998604 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6fd08b11-1328-47a3-82a3-286d70df4394-config-data\") pod \"memcached-0\" (UID: \"6fd08b11-1328-47a3-82a3-286d70df4394\") " pod="openstack/memcached-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.999022 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6fd08b11-1328-47a3-82a3-286d70df4394-kolla-config\") pod \"memcached-0\" (UID: \"6fd08b11-1328-47a3-82a3-286d70df4394\") " pod="openstack/memcached-0" Nov 26 22:40:09 crc kubenswrapper[4903]: I1126 22:40:09.999362 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6fd08b11-1328-47a3-82a3-286d70df4394-config-data\") pod \"memcached-0\" (UID: \"6fd08b11-1328-47a3-82a3-286d70df4394\") " pod="openstack/memcached-0" Nov 26 22:40:10 crc kubenswrapper[4903]: I1126 22:40:10.004591 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fd08b11-1328-47a3-82a3-286d70df4394-combined-ca-bundle\") pod \"memcached-0\" (UID: \"6fd08b11-1328-47a3-82a3-286d70df4394\") " pod="openstack/memcached-0" Nov 26 22:40:10 crc kubenswrapper[4903]: I1126 22:40:10.004969 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/6fd08b11-1328-47a3-82a3-286d70df4394-memcached-tls-certs\") pod \"memcached-0\" (UID: \"6fd08b11-1328-47a3-82a3-286d70df4394\") " pod="openstack/memcached-0" Nov 26 22:40:10 crc kubenswrapper[4903]: I1126 22:40:10.008786 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:10 crc kubenswrapper[4903]: I1126 22:40:10.017148 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rk96b\" (UniqueName: \"kubernetes.io/projected/6fd08b11-1328-47a3-82a3-286d70df4394-kube-api-access-rk96b\") pod \"memcached-0\" (UID: \"6fd08b11-1328-47a3-82a3-286d70df4394\") " pod="openstack/memcached-0" Nov 26 22:40:10 crc kubenswrapper[4903]: I1126 22:40:10.108702 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 26 22:40:10 crc kubenswrapper[4903]: I1126 22:40:10.218958 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"a97b1b29-2461-47c7-a3f9-71837fe03413","Type":"ContainerStarted","Data":"affb2988258f7d0b7bc50347170cbab5ade5832c1bbc5cc8a914c76e6f8415d0"} Nov 26 22:40:12 crc kubenswrapper[4903]: I1126 22:40:12.069969 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 22:40:12 crc kubenswrapper[4903]: I1126 22:40:12.072910 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 22:40:12 crc kubenswrapper[4903]: I1126 22:40:12.072992 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 22:40:12 crc kubenswrapper[4903]: I1126 22:40:12.074573 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-gn24s" Nov 26 22:40:12 crc kubenswrapper[4903]: I1126 22:40:12.155202 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k26s4\" (UniqueName: \"kubernetes.io/projected/f58d4082-e69c-44e2-9961-9842cb738869-kube-api-access-k26s4\") pod \"kube-state-metrics-0\" (UID: \"f58d4082-e69c-44e2-9961-9842cb738869\") " pod="openstack/kube-state-metrics-0" Nov 26 22:40:12 crc kubenswrapper[4903]: I1126 22:40:12.260831 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k26s4\" (UniqueName: \"kubernetes.io/projected/f58d4082-e69c-44e2-9961-9842cb738869-kube-api-access-k26s4\") pod \"kube-state-metrics-0\" (UID: \"f58d4082-e69c-44e2-9961-9842cb738869\") " pod="openstack/kube-state-metrics-0" Nov 26 22:40:12 crc kubenswrapper[4903]: I1126 22:40:12.303756 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k26s4\" (UniqueName: \"kubernetes.io/projected/f58d4082-e69c-44e2-9961-9842cb738869-kube-api-access-k26s4\") pod \"kube-state-metrics-0\" (UID: \"f58d4082-e69c-44e2-9961-9842cb738869\") " pod="openstack/kube-state-metrics-0" Nov 26 22:40:12 crc kubenswrapper[4903]: I1126 22:40:12.453711 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 22:40:12 crc kubenswrapper[4903]: I1126 22:40:12.751010 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-ui-dashboards-7d5fb4cbfb-ccgq6"] Nov 26 22:40:12 crc kubenswrapper[4903]: I1126 22:40:12.752538 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-ccgq6" Nov 26 22:40:12 crc kubenswrapper[4903]: I1126 22:40:12.755533 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards-sa-dockercfg-tx8t5" Nov 26 22:40:12 crc kubenswrapper[4903]: I1126 22:40:12.755727 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards" Nov 26 22:40:12 crc kubenswrapper[4903]: I1126 22:40:12.768384 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-7d5fb4cbfb-ccgq6"] Nov 26 22:40:12 crc kubenswrapper[4903]: I1126 22:40:12.874864 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2kbwq\" (UniqueName: \"kubernetes.io/projected/dff09e4b-a38e-43fa-8394-e6922e356c4d-kube-api-access-2kbwq\") pod \"observability-ui-dashboards-7d5fb4cbfb-ccgq6\" (UID: \"dff09e4b-a38e-43fa-8394-e6922e356c4d\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-ccgq6" Nov 26 22:40:12 crc kubenswrapper[4903]: I1126 22:40:12.874975 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dff09e4b-a38e-43fa-8394-e6922e356c4d-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-ccgq6\" (UID: \"dff09e4b-a38e-43fa-8394-e6922e356c4d\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-ccgq6" Nov 26 22:40:12 crc kubenswrapper[4903]: I1126 22:40:12.976483 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2kbwq\" (UniqueName: \"kubernetes.io/projected/dff09e4b-a38e-43fa-8394-e6922e356c4d-kube-api-access-2kbwq\") pod \"observability-ui-dashboards-7d5fb4cbfb-ccgq6\" (UID: \"dff09e4b-a38e-43fa-8394-e6922e356c4d\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-ccgq6" Nov 26 22:40:12 crc kubenswrapper[4903]: I1126 22:40:12.976569 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dff09e4b-a38e-43fa-8394-e6922e356c4d-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-ccgq6\" (UID: \"dff09e4b-a38e-43fa-8394-e6922e356c4d\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-ccgq6" Nov 26 22:40:12 crc kubenswrapper[4903]: E1126 22:40:12.976755 4903 secret.go:188] Couldn't get secret openshift-operators/observability-ui-dashboards: secret "observability-ui-dashboards" not found Nov 26 22:40:12 crc kubenswrapper[4903]: E1126 22:40:12.976801 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dff09e4b-a38e-43fa-8394-e6922e356c4d-serving-cert podName:dff09e4b-a38e-43fa-8394-e6922e356c4d nodeName:}" failed. No retries permitted until 2025-11-26 22:40:13.476785488 +0000 UTC m=+1142.167020398 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/dff09e4b-a38e-43fa-8394-e6922e356c4d-serving-cert") pod "observability-ui-dashboards-7d5fb4cbfb-ccgq6" (UID: "dff09e4b-a38e-43fa-8394-e6922e356c4d") : secret "observability-ui-dashboards" not found Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.000669 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2kbwq\" (UniqueName: \"kubernetes.io/projected/dff09e4b-a38e-43fa-8394-e6922e356c4d-kube-api-access-2kbwq\") pod \"observability-ui-dashboards-7d5fb4cbfb-ccgq6\" (UID: \"dff09e4b-a38e-43fa-8394-e6922e356c4d\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-ccgq6" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.080245 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-7f98674c46-g2nz8"] Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.081963 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7f98674c46-g2nz8" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.093567 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7f98674c46-g2nz8"] Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.180922 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/846f1e9e-ba63-4012-a677-d3732a751ada-oauth-serving-cert\") pod \"console-7f98674c46-g2nz8\" (UID: \"846f1e9e-ba63-4012-a677-d3732a751ada\") " pod="openshift-console/console-7f98674c46-g2nz8" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.181170 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f2sls\" (UniqueName: \"kubernetes.io/projected/846f1e9e-ba63-4012-a677-d3732a751ada-kube-api-access-f2sls\") pod \"console-7f98674c46-g2nz8\" (UID: \"846f1e9e-ba63-4012-a677-d3732a751ada\") " pod="openshift-console/console-7f98674c46-g2nz8" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.181265 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/846f1e9e-ba63-4012-a677-d3732a751ada-console-oauth-config\") pod \"console-7f98674c46-g2nz8\" (UID: \"846f1e9e-ba63-4012-a677-d3732a751ada\") " pod="openshift-console/console-7f98674c46-g2nz8" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.181446 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/846f1e9e-ba63-4012-a677-d3732a751ada-console-config\") pod \"console-7f98674c46-g2nz8\" (UID: \"846f1e9e-ba63-4012-a677-d3732a751ada\") " pod="openshift-console/console-7f98674c46-g2nz8" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.181547 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/846f1e9e-ba63-4012-a677-d3732a751ada-console-serving-cert\") pod \"console-7f98674c46-g2nz8\" (UID: \"846f1e9e-ba63-4012-a677-d3732a751ada\") " pod="openshift-console/console-7f98674c46-g2nz8" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.181678 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/846f1e9e-ba63-4012-a677-d3732a751ada-trusted-ca-bundle\") pod \"console-7f98674c46-g2nz8\" (UID: \"846f1e9e-ba63-4012-a677-d3732a751ada\") " pod="openshift-console/console-7f98674c46-g2nz8" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.181888 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/846f1e9e-ba63-4012-a677-d3732a751ada-service-ca\") pod \"console-7f98674c46-g2nz8\" (UID: \"846f1e9e-ba63-4012-a677-d3732a751ada\") " pod="openshift-console/console-7f98674c46-g2nz8" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.216057 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.218188 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.221227 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.221397 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.221549 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-jvntk" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.224162 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.224522 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.228310 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.228852 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.284292 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/846f1e9e-ba63-4012-a677-d3732a751ada-service-ca\") pod \"console-7f98674c46-g2nz8\" (UID: \"846f1e9e-ba63-4012-a677-d3732a751ada\") " pod="openshift-console/console-7f98674c46-g2nz8" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.284349 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/846f1e9e-ba63-4012-a677-d3732a751ada-oauth-serving-cert\") pod \"console-7f98674c46-g2nz8\" (UID: \"846f1e9e-ba63-4012-a677-d3732a751ada\") " pod="openshift-console/console-7f98674c46-g2nz8" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.284370 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f2sls\" (UniqueName: \"kubernetes.io/projected/846f1e9e-ba63-4012-a677-d3732a751ada-kube-api-access-f2sls\") pod \"console-7f98674c46-g2nz8\" (UID: \"846f1e9e-ba63-4012-a677-d3732a751ada\") " pod="openshift-console/console-7f98674c46-g2nz8" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.284400 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/846f1e9e-ba63-4012-a677-d3732a751ada-console-oauth-config\") pod \"console-7f98674c46-g2nz8\" (UID: \"846f1e9e-ba63-4012-a677-d3732a751ada\") " pod="openshift-console/console-7f98674c46-g2nz8" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.284448 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/846f1e9e-ba63-4012-a677-d3732a751ada-console-config\") pod \"console-7f98674c46-g2nz8\" (UID: \"846f1e9e-ba63-4012-a677-d3732a751ada\") " pod="openshift-console/console-7f98674c46-g2nz8" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.284469 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/846f1e9e-ba63-4012-a677-d3732a751ada-console-serving-cert\") pod \"console-7f98674c46-g2nz8\" (UID: \"846f1e9e-ba63-4012-a677-d3732a751ada\") " pod="openshift-console/console-7f98674c46-g2nz8" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.284534 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/846f1e9e-ba63-4012-a677-d3732a751ada-trusted-ca-bundle\") pod \"console-7f98674c46-g2nz8\" (UID: \"846f1e9e-ba63-4012-a677-d3732a751ada\") " pod="openshift-console/console-7f98674c46-g2nz8" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.285171 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/846f1e9e-ba63-4012-a677-d3732a751ada-service-ca\") pod \"console-7f98674c46-g2nz8\" (UID: \"846f1e9e-ba63-4012-a677-d3732a751ada\") " pod="openshift-console/console-7f98674c46-g2nz8" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.285243 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/846f1e9e-ba63-4012-a677-d3732a751ada-oauth-serving-cert\") pod \"console-7f98674c46-g2nz8\" (UID: \"846f1e9e-ba63-4012-a677-d3732a751ada\") " pod="openshift-console/console-7f98674c46-g2nz8" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.286083 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/846f1e9e-ba63-4012-a677-d3732a751ada-console-config\") pod \"console-7f98674c46-g2nz8\" (UID: \"846f1e9e-ba63-4012-a677-d3732a751ada\") " pod="openshift-console/console-7f98674c46-g2nz8" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.286394 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/846f1e9e-ba63-4012-a677-d3732a751ada-trusted-ca-bundle\") pod \"console-7f98674c46-g2nz8\" (UID: \"846f1e9e-ba63-4012-a677-d3732a751ada\") " pod="openshift-console/console-7f98674c46-g2nz8" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.293931 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/846f1e9e-ba63-4012-a677-d3732a751ada-console-serving-cert\") pod \"console-7f98674c46-g2nz8\" (UID: \"846f1e9e-ba63-4012-a677-d3732a751ada\") " pod="openshift-console/console-7f98674c46-g2nz8" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.299250 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/846f1e9e-ba63-4012-a677-d3732a751ada-console-oauth-config\") pod \"console-7f98674c46-g2nz8\" (UID: \"846f1e9e-ba63-4012-a677-d3732a751ada\") " pod="openshift-console/console-7f98674c46-g2nz8" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.304953 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f2sls\" (UniqueName: \"kubernetes.io/projected/846f1e9e-ba63-4012-a677-d3732a751ada-kube-api-access-f2sls\") pod \"console-7f98674c46-g2nz8\" (UID: \"846f1e9e-ba63-4012-a677-d3732a751ada\") " pod="openshift-console/console-7f98674c46-g2nz8" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.387144 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4d75ba9f-0873-4d65-b0c9-5347134bfcce-config\") pod \"prometheus-metric-storage-0\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.387247 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/4d75ba9f-0873-4d65-b0c9-5347134bfcce-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.387283 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/4d75ba9f-0873-4d65-b0c9-5347134bfcce-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.387302 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/4d75ba9f-0873-4d65-b0c9-5347134bfcce-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.387343 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/4d75ba9f-0873-4d65-b0c9-5347134bfcce-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.387626 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-52882e42-0fcf-4f74-a4db-25081e0470db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-52882e42-0fcf-4f74-a4db-25081e0470db\") pod \"prometheus-metric-storage-0\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.387716 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qn9bq\" (UniqueName: \"kubernetes.io/projected/4d75ba9f-0873-4d65-b0c9-5347134bfcce-kube-api-access-qn9bq\") pod \"prometheus-metric-storage-0\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.387743 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/4d75ba9f-0873-4d65-b0c9-5347134bfcce-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.410056 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7f98674c46-g2nz8" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.489782 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dff09e4b-a38e-43fa-8394-e6922e356c4d-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-ccgq6\" (UID: \"dff09e4b-a38e-43fa-8394-e6922e356c4d\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-ccgq6" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.490503 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/4d75ba9f-0873-4d65-b0c9-5347134bfcce-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.490559 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/4d75ba9f-0873-4d65-b0c9-5347134bfcce-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.490586 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/4d75ba9f-0873-4d65-b0c9-5347134bfcce-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.490641 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/4d75ba9f-0873-4d65-b0c9-5347134bfcce-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.490769 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-52882e42-0fcf-4f74-a4db-25081e0470db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-52882e42-0fcf-4f74-a4db-25081e0470db\") pod \"prometheus-metric-storage-0\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.490804 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qn9bq\" (UniqueName: \"kubernetes.io/projected/4d75ba9f-0873-4d65-b0c9-5347134bfcce-kube-api-access-qn9bq\") pod \"prometheus-metric-storage-0\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.490827 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/4d75ba9f-0873-4d65-b0c9-5347134bfcce-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.490869 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4d75ba9f-0873-4d65-b0c9-5347134bfcce-config\") pod \"prometheus-metric-storage-0\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.491442 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/4d75ba9f-0873-4d65-b0c9-5347134bfcce-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.494993 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dff09e4b-a38e-43fa-8394-e6922e356c4d-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-ccgq6\" (UID: \"dff09e4b-a38e-43fa-8394-e6922e356c4d\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-ccgq6" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.495176 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/4d75ba9f-0873-4d65-b0c9-5347134bfcce-config\") pod \"prometheus-metric-storage-0\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.495332 4903 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.495367 4903 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-52882e42-0fcf-4f74-a4db-25081e0470db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-52882e42-0fcf-4f74-a4db-25081e0470db\") pod \"prometheus-metric-storage-0\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/381a5ecb46bedd79ebe106a71ed6a8c447ce6be192d3691459f97b3265cfc441/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.495459 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/4d75ba9f-0873-4d65-b0c9-5347134bfcce-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.495622 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/4d75ba9f-0873-4d65-b0c9-5347134bfcce-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.503665 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/4d75ba9f-0873-4d65-b0c9-5347134bfcce-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.513455 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qn9bq\" (UniqueName: \"kubernetes.io/projected/4d75ba9f-0873-4d65-b0c9-5347134bfcce-kube-api-access-qn9bq\") pod \"prometheus-metric-storage-0\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.517142 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/4d75ba9f-0873-4d65-b0c9-5347134bfcce-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.527833 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-52882e42-0fcf-4f74-a4db-25081e0470db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-52882e42-0fcf-4f74-a4db-25081e0470db\") pod \"prometheus-metric-storage-0\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.535185 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 26 22:40:13 crc kubenswrapper[4903]: I1126 22:40:13.687799 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-ccgq6" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.194611 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-kzb8j"] Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.196236 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-kzb8j" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.198459 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-wvnqm" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.199754 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.201194 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.205557 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-mv2r4"] Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.208001 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-mv2r4" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.224195 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-mv2r4"] Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.233422 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-kzb8j"] Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.255253 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1aa29ea2-aaab-435e-9995-41a5f137be03-var-log-ovn\") pod \"ovn-controller-kzb8j\" (UID: \"1aa29ea2-aaab-435e-9995-41a5f137be03\") " pod="openstack/ovn-controller-kzb8j" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.255299 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1aa29ea2-aaab-435e-9995-41a5f137be03-combined-ca-bundle\") pod \"ovn-controller-kzb8j\" (UID: \"1aa29ea2-aaab-435e-9995-41a5f137be03\") " pod="openstack/ovn-controller-kzb8j" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.255322 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/1aa29ea2-aaab-435e-9995-41a5f137be03-ovn-controller-tls-certs\") pod \"ovn-controller-kzb8j\" (UID: \"1aa29ea2-aaab-435e-9995-41a5f137be03\") " pod="openstack/ovn-controller-kzb8j" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.257093 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7zpck\" (UniqueName: \"kubernetes.io/projected/1aa29ea2-aaab-435e-9995-41a5f137be03-kube-api-access-7zpck\") pod \"ovn-controller-kzb8j\" (UID: \"1aa29ea2-aaab-435e-9995-41a5f137be03\") " pod="openstack/ovn-controller-kzb8j" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.257133 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1aa29ea2-aaab-435e-9995-41a5f137be03-var-run-ovn\") pod \"ovn-controller-kzb8j\" (UID: \"1aa29ea2-aaab-435e-9995-41a5f137be03\") " pod="openstack/ovn-controller-kzb8j" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.257281 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1aa29ea2-aaab-435e-9995-41a5f137be03-scripts\") pod \"ovn-controller-kzb8j\" (UID: \"1aa29ea2-aaab-435e-9995-41a5f137be03\") " pod="openstack/ovn-controller-kzb8j" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.257453 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1aa29ea2-aaab-435e-9995-41a5f137be03-var-run\") pod \"ovn-controller-kzb8j\" (UID: \"1aa29ea2-aaab-435e-9995-41a5f137be03\") " pod="openstack/ovn-controller-kzb8j" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.358949 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/bfc848b9-8183-4fb5-b8ce-d9542294079f-var-log\") pod \"ovn-controller-ovs-mv2r4\" (UID: \"bfc848b9-8183-4fb5-b8ce-d9542294079f\") " pod="openstack/ovn-controller-ovs-mv2r4" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.359023 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/bfc848b9-8183-4fb5-b8ce-d9542294079f-var-run\") pod \"ovn-controller-ovs-mv2r4\" (UID: \"bfc848b9-8183-4fb5-b8ce-d9542294079f\") " pod="openstack/ovn-controller-ovs-mv2r4" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.359067 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bfc848b9-8183-4fb5-b8ce-d9542294079f-scripts\") pod \"ovn-controller-ovs-mv2r4\" (UID: \"bfc848b9-8183-4fb5-b8ce-d9542294079f\") " pod="openstack/ovn-controller-ovs-mv2r4" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.359092 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/bfc848b9-8183-4fb5-b8ce-d9542294079f-var-lib\") pod \"ovn-controller-ovs-mv2r4\" (UID: \"bfc848b9-8183-4fb5-b8ce-d9542294079f\") " pod="openstack/ovn-controller-ovs-mv2r4" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.359160 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1aa29ea2-aaab-435e-9995-41a5f137be03-var-log-ovn\") pod \"ovn-controller-kzb8j\" (UID: \"1aa29ea2-aaab-435e-9995-41a5f137be03\") " pod="openstack/ovn-controller-kzb8j" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.359237 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkx9x\" (UniqueName: \"kubernetes.io/projected/bfc848b9-8183-4fb5-b8ce-d9542294079f-kube-api-access-pkx9x\") pod \"ovn-controller-ovs-mv2r4\" (UID: \"bfc848b9-8183-4fb5-b8ce-d9542294079f\") " pod="openstack/ovn-controller-ovs-mv2r4" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.359263 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1aa29ea2-aaab-435e-9995-41a5f137be03-combined-ca-bundle\") pod \"ovn-controller-kzb8j\" (UID: \"1aa29ea2-aaab-435e-9995-41a5f137be03\") " pod="openstack/ovn-controller-kzb8j" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.359290 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/1aa29ea2-aaab-435e-9995-41a5f137be03-ovn-controller-tls-certs\") pod \"ovn-controller-kzb8j\" (UID: \"1aa29ea2-aaab-435e-9995-41a5f137be03\") " pod="openstack/ovn-controller-kzb8j" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.359343 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7zpck\" (UniqueName: \"kubernetes.io/projected/1aa29ea2-aaab-435e-9995-41a5f137be03-kube-api-access-7zpck\") pod \"ovn-controller-kzb8j\" (UID: \"1aa29ea2-aaab-435e-9995-41a5f137be03\") " pod="openstack/ovn-controller-kzb8j" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.359378 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1aa29ea2-aaab-435e-9995-41a5f137be03-var-run-ovn\") pod \"ovn-controller-kzb8j\" (UID: \"1aa29ea2-aaab-435e-9995-41a5f137be03\") " pod="openstack/ovn-controller-kzb8j" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.359408 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1aa29ea2-aaab-435e-9995-41a5f137be03-scripts\") pod \"ovn-controller-kzb8j\" (UID: \"1aa29ea2-aaab-435e-9995-41a5f137be03\") " pod="openstack/ovn-controller-kzb8j" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.359468 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/bfc848b9-8183-4fb5-b8ce-d9542294079f-etc-ovs\") pod \"ovn-controller-ovs-mv2r4\" (UID: \"bfc848b9-8183-4fb5-b8ce-d9542294079f\") " pod="openstack/ovn-controller-ovs-mv2r4" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.359495 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1aa29ea2-aaab-435e-9995-41a5f137be03-var-run\") pod \"ovn-controller-kzb8j\" (UID: \"1aa29ea2-aaab-435e-9995-41a5f137be03\") " pod="openstack/ovn-controller-kzb8j" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.360098 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1aa29ea2-aaab-435e-9995-41a5f137be03-var-run\") pod \"ovn-controller-kzb8j\" (UID: \"1aa29ea2-aaab-435e-9995-41a5f137be03\") " pod="openstack/ovn-controller-kzb8j" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.360260 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1aa29ea2-aaab-435e-9995-41a5f137be03-var-run-ovn\") pod \"ovn-controller-kzb8j\" (UID: \"1aa29ea2-aaab-435e-9995-41a5f137be03\") " pod="openstack/ovn-controller-kzb8j" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.361852 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1aa29ea2-aaab-435e-9995-41a5f137be03-var-log-ovn\") pod \"ovn-controller-kzb8j\" (UID: \"1aa29ea2-aaab-435e-9995-41a5f137be03\") " pod="openstack/ovn-controller-kzb8j" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.362966 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1aa29ea2-aaab-435e-9995-41a5f137be03-scripts\") pod \"ovn-controller-kzb8j\" (UID: \"1aa29ea2-aaab-435e-9995-41a5f137be03\") " pod="openstack/ovn-controller-kzb8j" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.365229 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1aa29ea2-aaab-435e-9995-41a5f137be03-combined-ca-bundle\") pod \"ovn-controller-kzb8j\" (UID: \"1aa29ea2-aaab-435e-9995-41a5f137be03\") " pod="openstack/ovn-controller-kzb8j" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.379932 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7zpck\" (UniqueName: \"kubernetes.io/projected/1aa29ea2-aaab-435e-9995-41a5f137be03-kube-api-access-7zpck\") pod \"ovn-controller-kzb8j\" (UID: \"1aa29ea2-aaab-435e-9995-41a5f137be03\") " pod="openstack/ovn-controller-kzb8j" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.380306 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/1aa29ea2-aaab-435e-9995-41a5f137be03-ovn-controller-tls-certs\") pod \"ovn-controller-kzb8j\" (UID: \"1aa29ea2-aaab-435e-9995-41a5f137be03\") " pod="openstack/ovn-controller-kzb8j" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.461348 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/bfc848b9-8183-4fb5-b8ce-d9542294079f-var-log\") pod \"ovn-controller-ovs-mv2r4\" (UID: \"bfc848b9-8183-4fb5-b8ce-d9542294079f\") " pod="openstack/ovn-controller-ovs-mv2r4" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.461406 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/bfc848b9-8183-4fb5-b8ce-d9542294079f-var-run\") pod \"ovn-controller-ovs-mv2r4\" (UID: \"bfc848b9-8183-4fb5-b8ce-d9542294079f\") " pod="openstack/ovn-controller-ovs-mv2r4" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.461432 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bfc848b9-8183-4fb5-b8ce-d9542294079f-scripts\") pod \"ovn-controller-ovs-mv2r4\" (UID: \"bfc848b9-8183-4fb5-b8ce-d9542294079f\") " pod="openstack/ovn-controller-ovs-mv2r4" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.461450 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/bfc848b9-8183-4fb5-b8ce-d9542294079f-var-lib\") pod \"ovn-controller-ovs-mv2r4\" (UID: \"bfc848b9-8183-4fb5-b8ce-d9542294079f\") " pod="openstack/ovn-controller-ovs-mv2r4" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.461500 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pkx9x\" (UniqueName: \"kubernetes.io/projected/bfc848b9-8183-4fb5-b8ce-d9542294079f-kube-api-access-pkx9x\") pod \"ovn-controller-ovs-mv2r4\" (UID: \"bfc848b9-8183-4fb5-b8ce-d9542294079f\") " pod="openstack/ovn-controller-ovs-mv2r4" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.461574 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/bfc848b9-8183-4fb5-b8ce-d9542294079f-etc-ovs\") pod \"ovn-controller-ovs-mv2r4\" (UID: \"bfc848b9-8183-4fb5-b8ce-d9542294079f\") " pod="openstack/ovn-controller-ovs-mv2r4" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.461898 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/bfc848b9-8183-4fb5-b8ce-d9542294079f-etc-ovs\") pod \"ovn-controller-ovs-mv2r4\" (UID: \"bfc848b9-8183-4fb5-b8ce-d9542294079f\") " pod="openstack/ovn-controller-ovs-mv2r4" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.462094 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/bfc848b9-8183-4fb5-b8ce-d9542294079f-var-run\") pod \"ovn-controller-ovs-mv2r4\" (UID: \"bfc848b9-8183-4fb5-b8ce-d9542294079f\") " pod="openstack/ovn-controller-ovs-mv2r4" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.462165 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/bfc848b9-8183-4fb5-b8ce-d9542294079f-var-lib\") pod \"ovn-controller-ovs-mv2r4\" (UID: \"bfc848b9-8183-4fb5-b8ce-d9542294079f\") " pod="openstack/ovn-controller-ovs-mv2r4" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.466188 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bfc848b9-8183-4fb5-b8ce-d9542294079f-scripts\") pod \"ovn-controller-ovs-mv2r4\" (UID: \"bfc848b9-8183-4fb5-b8ce-d9542294079f\") " pod="openstack/ovn-controller-ovs-mv2r4" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.466332 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/bfc848b9-8183-4fb5-b8ce-d9542294079f-var-log\") pod \"ovn-controller-ovs-mv2r4\" (UID: \"bfc848b9-8183-4fb5-b8ce-d9542294079f\") " pod="openstack/ovn-controller-ovs-mv2r4" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.477833 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkx9x\" (UniqueName: \"kubernetes.io/projected/bfc848b9-8183-4fb5-b8ce-d9542294079f-kube-api-access-pkx9x\") pod \"ovn-controller-ovs-mv2r4\" (UID: \"bfc848b9-8183-4fb5-b8ce-d9542294079f\") " pod="openstack/ovn-controller-ovs-mv2r4" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.516855 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-kzb8j" Nov 26 22:40:16 crc kubenswrapper[4903]: I1126 22:40:16.535605 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-mv2r4" Nov 26 22:40:18 crc kubenswrapper[4903]: I1126 22:40:18.797160 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 22:40:18 crc kubenswrapper[4903]: I1126 22:40:18.798962 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:18 crc kubenswrapper[4903]: I1126 22:40:18.802108 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 26 22:40:18 crc kubenswrapper[4903]: I1126 22:40:18.802319 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 26 22:40:18 crc kubenswrapper[4903]: I1126 22:40:18.802547 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-kfg7g" Nov 26 22:40:18 crc kubenswrapper[4903]: I1126 22:40:18.802758 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 26 22:40:18 crc kubenswrapper[4903]: I1126 22:40:18.802848 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 26 22:40:18 crc kubenswrapper[4903]: I1126 22:40:18.808958 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 22:40:18 crc kubenswrapper[4903]: I1126 22:40:18.929270 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e\") " pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:18 crc kubenswrapper[4903]: I1126 22:40:18.929363 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-nb-0\" (UID: \"ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e\") " pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:18 crc kubenswrapper[4903]: I1126 22:40:18.929437 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vpq4f\" (UniqueName: \"kubernetes.io/projected/ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e-kube-api-access-vpq4f\") pod \"ovsdbserver-nb-0\" (UID: \"ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e\") " pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:18 crc kubenswrapper[4903]: I1126 22:40:18.929473 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e-config\") pod \"ovsdbserver-nb-0\" (UID: \"ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e\") " pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:18 crc kubenswrapper[4903]: I1126 22:40:18.929533 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e\") " pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:18 crc kubenswrapper[4903]: I1126 22:40:18.929608 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e\") " pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:18 crc kubenswrapper[4903]: I1126 22:40:18.929655 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e\") " pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:18 crc kubenswrapper[4903]: I1126 22:40:18.929721 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e\") " pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.006018 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.008153 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.010187 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.010298 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.010446 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-2jtg6" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.010699 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.015463 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.031063 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vpq4f\" (UniqueName: \"kubernetes.io/projected/ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e-kube-api-access-vpq4f\") pod \"ovsdbserver-nb-0\" (UID: \"ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e\") " pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.031125 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e-config\") pod \"ovsdbserver-nb-0\" (UID: \"ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e\") " pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.031195 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e\") " pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.031269 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e\") " pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.031322 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e\") " pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.031353 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e\") " pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.031383 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e\") " pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.031433 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-nb-0\" (UID: \"ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e\") " pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.031973 4903 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-nb-0\" (UID: \"ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.032173 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e\") " pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.032785 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e\") " pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.033018 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e-config\") pod \"ovsdbserver-nb-0\" (UID: \"ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e\") " pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.037720 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e\") " pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.038403 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e\") " pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.039486 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e\") " pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.063536 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vpq4f\" (UniqueName: \"kubernetes.io/projected/ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e-kube-api-access-vpq4f\") pod \"ovsdbserver-nb-0\" (UID: \"ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e\") " pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.071138 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-nb-0\" (UID: \"ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e\") " pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.133575 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4286478b-1146-4f96-8819-753c3f6a6158-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"4286478b-1146-4f96-8819-753c3f6a6158\") " pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.133644 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4286478b-1146-4f96-8819-753c3f6a6158-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"4286478b-1146-4f96-8819-753c3f6a6158\") " pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.133754 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4286478b-1146-4f96-8819-753c3f6a6158-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"4286478b-1146-4f96-8819-753c3f6a6158\") " pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.133779 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4286478b-1146-4f96-8819-753c3f6a6158-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"4286478b-1146-4f96-8819-753c3f6a6158\") " pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.133827 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4286478b-1146-4f96-8819-753c3f6a6158-config\") pod \"ovsdbserver-sb-0\" (UID: \"4286478b-1146-4f96-8819-753c3f6a6158\") " pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.133868 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4286478b-1146-4f96-8819-753c3f6a6158-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"4286478b-1146-4f96-8819-753c3f6a6158\") " pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.133890 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-sb-0\" (UID: \"4286478b-1146-4f96-8819-753c3f6a6158\") " pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.133931 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-884gh\" (UniqueName: \"kubernetes.io/projected/4286478b-1146-4f96-8819-753c3f6a6158-kube-api-access-884gh\") pod \"ovsdbserver-sb-0\" (UID: \"4286478b-1146-4f96-8819-753c3f6a6158\") " pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.141026 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.235759 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4286478b-1146-4f96-8819-753c3f6a6158-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"4286478b-1146-4f96-8819-753c3f6a6158\") " pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.235882 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4286478b-1146-4f96-8819-753c3f6a6158-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"4286478b-1146-4f96-8819-753c3f6a6158\") " pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.235916 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4286478b-1146-4f96-8819-753c3f6a6158-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"4286478b-1146-4f96-8819-753c3f6a6158\") " pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.236413 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4286478b-1146-4f96-8819-753c3f6a6158-config\") pod \"ovsdbserver-sb-0\" (UID: \"4286478b-1146-4f96-8819-753c3f6a6158\") " pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.236453 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4286478b-1146-4f96-8819-753c3f6a6158-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"4286478b-1146-4f96-8819-753c3f6a6158\") " pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.236473 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-sb-0\" (UID: \"4286478b-1146-4f96-8819-753c3f6a6158\") " pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.236519 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-884gh\" (UniqueName: \"kubernetes.io/projected/4286478b-1146-4f96-8819-753c3f6a6158-kube-api-access-884gh\") pod \"ovsdbserver-sb-0\" (UID: \"4286478b-1146-4f96-8819-753c3f6a6158\") " pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.236557 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4286478b-1146-4f96-8819-753c3f6a6158-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"4286478b-1146-4f96-8819-753c3f6a6158\") " pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.236904 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4286478b-1146-4f96-8819-753c3f6a6158-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"4286478b-1146-4f96-8819-753c3f6a6158\") " pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.237017 4903 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-sb-0\" (UID: \"4286478b-1146-4f96-8819-753c3f6a6158\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.237713 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4286478b-1146-4f96-8819-753c3f6a6158-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"4286478b-1146-4f96-8819-753c3f6a6158\") " pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.237814 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4286478b-1146-4f96-8819-753c3f6a6158-config\") pod \"ovsdbserver-sb-0\" (UID: \"4286478b-1146-4f96-8819-753c3f6a6158\") " pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.240479 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4286478b-1146-4f96-8819-753c3f6a6158-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"4286478b-1146-4f96-8819-753c3f6a6158\") " pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.242324 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4286478b-1146-4f96-8819-753c3f6a6158-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"4286478b-1146-4f96-8819-753c3f6a6158\") " pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.242621 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4286478b-1146-4f96-8819-753c3f6a6158-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"4286478b-1146-4f96-8819-753c3f6a6158\") " pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.263605 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-884gh\" (UniqueName: \"kubernetes.io/projected/4286478b-1146-4f96-8819-753c3f6a6158-kube-api-access-884gh\") pod \"ovsdbserver-sb-0\" (UID: \"4286478b-1146-4f96-8819-753c3f6a6158\") " pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.267574 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-sb-0\" (UID: \"4286478b-1146-4f96-8819-753c3f6a6158\") " pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:19 crc kubenswrapper[4903]: I1126 22:40:19.325359 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:21 crc kubenswrapper[4903]: E1126 22:40:21.649335 4903 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 26 22:40:21 crc kubenswrapper[4903]: E1126 22:40:21.650460 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tt2tj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-lqctt_openstack(b5fa15be-adee-4af2-a008-61e95b025d3a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 22:40:21 crc kubenswrapper[4903]: E1126 22:40:21.651649 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-lqctt" podUID="b5fa15be-adee-4af2-a008-61e95b025d3a" Nov 26 22:40:21 crc kubenswrapper[4903]: E1126 22:40:21.907263 4903 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 26 22:40:21 crc kubenswrapper[4903]: E1126 22:40:21.907440 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jmfkh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-s4l25_openstack(0d5d45d8-1f13-4a89-8c86-573963d436ec): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 22:40:21 crc kubenswrapper[4903]: E1126 22:40:21.908854 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-s4l25" podUID="0d5d45d8-1f13-4a89-8c86-573963d436ec" Nov 26 22:40:23 crc kubenswrapper[4903]: I1126 22:40:23.405579 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-s4l25" event={"ID":"0d5d45d8-1f13-4a89-8c86-573963d436ec","Type":"ContainerDied","Data":"d48f77f861b2eb44c15c8a9b1e20b14918d1770377852fc7e0957af73d138f80"} Nov 26 22:40:23 crc kubenswrapper[4903]: I1126 22:40:23.405810 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d48f77f861b2eb44c15c8a9b1e20b14918d1770377852fc7e0957af73d138f80" Nov 26 22:40:23 crc kubenswrapper[4903]: I1126 22:40:23.408154 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-lqctt" event={"ID":"b5fa15be-adee-4af2-a008-61e95b025d3a","Type":"ContainerDied","Data":"a50c047cee1b24f317930f171f76791942cdcac0e5adc2e07e2c571603888d68"} Nov 26 22:40:23 crc kubenswrapper[4903]: I1126 22:40:23.408178 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a50c047cee1b24f317930f171f76791942cdcac0e5adc2e07e2c571603888d68" Nov 26 22:40:23 crc kubenswrapper[4903]: I1126 22:40:23.503486 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-lqctt" Nov 26 22:40:23 crc kubenswrapper[4903]: I1126 22:40:23.524145 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-s4l25" Nov 26 22:40:23 crc kubenswrapper[4903]: I1126 22:40:23.647422 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tt2tj\" (UniqueName: \"kubernetes.io/projected/b5fa15be-adee-4af2-a008-61e95b025d3a-kube-api-access-tt2tj\") pod \"b5fa15be-adee-4af2-a008-61e95b025d3a\" (UID: \"b5fa15be-adee-4af2-a008-61e95b025d3a\") " Nov 26 22:40:23 crc kubenswrapper[4903]: I1126 22:40:23.647884 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jmfkh\" (UniqueName: \"kubernetes.io/projected/0d5d45d8-1f13-4a89-8c86-573963d436ec-kube-api-access-jmfkh\") pod \"0d5d45d8-1f13-4a89-8c86-573963d436ec\" (UID: \"0d5d45d8-1f13-4a89-8c86-573963d436ec\") " Nov 26 22:40:23 crc kubenswrapper[4903]: I1126 22:40:23.647950 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5fa15be-adee-4af2-a008-61e95b025d3a-config\") pod \"b5fa15be-adee-4af2-a008-61e95b025d3a\" (UID: \"b5fa15be-adee-4af2-a008-61e95b025d3a\") " Nov 26 22:40:23 crc kubenswrapper[4903]: I1126 22:40:23.648651 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b5fa15be-adee-4af2-a008-61e95b025d3a-config" (OuterVolumeSpecName: "config") pod "b5fa15be-adee-4af2-a008-61e95b025d3a" (UID: "b5fa15be-adee-4af2-a008-61e95b025d3a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:40:23 crc kubenswrapper[4903]: I1126 22:40:23.648848 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0d5d45d8-1f13-4a89-8c86-573963d436ec-dns-svc\") pod \"0d5d45d8-1f13-4a89-8c86-573963d436ec\" (UID: \"0d5d45d8-1f13-4a89-8c86-573963d436ec\") " Nov 26 22:40:23 crc kubenswrapper[4903]: I1126 22:40:23.648872 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d5d45d8-1f13-4a89-8c86-573963d436ec-config\") pod \"0d5d45d8-1f13-4a89-8c86-573963d436ec\" (UID: \"0d5d45d8-1f13-4a89-8c86-573963d436ec\") " Nov 26 22:40:23 crc kubenswrapper[4903]: I1126 22:40:23.649294 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d5d45d8-1f13-4a89-8c86-573963d436ec-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0d5d45d8-1f13-4a89-8c86-573963d436ec" (UID: "0d5d45d8-1f13-4a89-8c86-573963d436ec"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:40:23 crc kubenswrapper[4903]: I1126 22:40:23.649710 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d5d45d8-1f13-4a89-8c86-573963d436ec-config" (OuterVolumeSpecName: "config") pod "0d5d45d8-1f13-4a89-8c86-573963d436ec" (UID: "0d5d45d8-1f13-4a89-8c86-573963d436ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:40:23 crc kubenswrapper[4903]: I1126 22:40:23.649987 4903 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0d5d45d8-1f13-4a89-8c86-573963d436ec-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 22:40:23 crc kubenswrapper[4903]: I1126 22:40:23.650007 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d5d45d8-1f13-4a89-8c86-573963d436ec-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:40:23 crc kubenswrapper[4903]: I1126 22:40:23.650017 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5fa15be-adee-4af2-a008-61e95b025d3a-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:40:23 crc kubenswrapper[4903]: I1126 22:40:23.653368 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d5d45d8-1f13-4a89-8c86-573963d436ec-kube-api-access-jmfkh" (OuterVolumeSpecName: "kube-api-access-jmfkh") pod "0d5d45d8-1f13-4a89-8c86-573963d436ec" (UID: "0d5d45d8-1f13-4a89-8c86-573963d436ec"). InnerVolumeSpecName "kube-api-access-jmfkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:40:23 crc kubenswrapper[4903]: I1126 22:40:23.656789 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5fa15be-adee-4af2-a008-61e95b025d3a-kube-api-access-tt2tj" (OuterVolumeSpecName: "kube-api-access-tt2tj") pod "b5fa15be-adee-4af2-a008-61e95b025d3a" (UID: "b5fa15be-adee-4af2-a008-61e95b025d3a"). InnerVolumeSpecName "kube-api-access-tt2tj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:40:23 crc kubenswrapper[4903]: I1126 22:40:23.753461 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tt2tj\" (UniqueName: \"kubernetes.io/projected/b5fa15be-adee-4af2-a008-61e95b025d3a-kube-api-access-tt2tj\") on node \"crc\" DevicePath \"\"" Nov 26 22:40:23 crc kubenswrapper[4903]: I1126 22:40:23.753500 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jmfkh\" (UniqueName: \"kubernetes.io/projected/0d5d45d8-1f13-4a89-8c86-573963d436ec-kube-api-access-jmfkh\") on node \"crc\" DevicePath \"\"" Nov 26 22:40:23 crc kubenswrapper[4903]: I1126 22:40:23.821099 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 22:40:23 crc kubenswrapper[4903]: I1126 22:40:23.977477 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 22:40:24 crc kubenswrapper[4903]: I1126 22:40:24.018475 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 26 22:40:24 crc kubenswrapper[4903]: I1126 22:40:24.027087 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 26 22:40:24 crc kubenswrapper[4903]: W1126 22:40:24.031933 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6fd08b11_1328_47a3_82a3_286d70df4394.slice/crio-6c3e15d30bc50e53294584b972db35dac355b16fde094ac9fc01d0cb2bcd3bbc WatchSource:0}: Error finding container 6c3e15d30bc50e53294584b972db35dac355b16fde094ac9fc01d0cb2bcd3bbc: Status 404 returned error can't find the container with id 6c3e15d30bc50e53294584b972db35dac355b16fde094ac9fc01d0cb2bcd3bbc Nov 26 22:40:24 crc kubenswrapper[4903]: I1126 22:40:24.420316 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b1969a76-48dc-4a53-8ee9-f9b5a5670e30","Type":"ContainerStarted","Data":"c08256cb5980e74228ca3b60f1885c3fa3baff9cf8f0be9daa356fe7052bbb0e"} Nov 26 22:40:24 crc kubenswrapper[4903]: I1126 22:40:24.421718 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"6fd08b11-1328-47a3-82a3-286d70df4394","Type":"ContainerStarted","Data":"6c3e15d30bc50e53294584b972db35dac355b16fde094ac9fc01d0cb2bcd3bbc"} Nov 26 22:40:24 crc kubenswrapper[4903]: I1126 22:40:24.423067 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-kzb8j"] Nov 26 22:40:24 crc kubenswrapper[4903]: I1126 22:40:24.423094 4903 generic.go:334] "Generic (PLEG): container finished" podID="0c6654bc-c2b5-45a0-93ab-c338beb90d3c" containerID="7740e5855e741a31e18e489b7abeef68e7faa5bce40ce28530f9421b07e99494" exitCode=0 Nov 26 22:40:24 crc kubenswrapper[4903]: I1126 22:40:24.423115 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-b82qc" event={"ID":"0c6654bc-c2b5-45a0-93ab-c338beb90d3c","Type":"ContainerDied","Data":"7740e5855e741a31e18e489b7abeef68e7faa5bce40ce28530f9421b07e99494"} Nov 26 22:40:24 crc kubenswrapper[4903]: I1126 22:40:24.426552 4903 generic.go:334] "Generic (PLEG): container finished" podID="7f090a22-bd8a-4d9c-a477-c8b80664bbc5" containerID="884d4cc8d7207cbda02636725d8904068a8d15754cec9b167e421e36500b25cd" exitCode=0 Nov 26 22:40:24 crc kubenswrapper[4903]: I1126 22:40:24.426724 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-jg7s2" event={"ID":"7f090a22-bd8a-4d9c-a477-c8b80664bbc5","Type":"ContainerDied","Data":"884d4cc8d7207cbda02636725d8904068a8d15754cec9b167e421e36500b25cd"} Nov 26 22:40:24 crc kubenswrapper[4903]: I1126 22:40:24.428711 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"910b9022-54fc-4f7d-b69b-bdb7661cb91d","Type":"ContainerStarted","Data":"20a245738a3ce9bf04c5af13564381e9e68c5840419c912276ff90749b0754f4"} Nov 26 22:40:24 crc kubenswrapper[4903]: I1126 22:40:24.430939 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-lqctt" Nov 26 22:40:24 crc kubenswrapper[4903]: I1126 22:40:24.430937 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"aabcbcd0-4cc0-495d-b059-6b8722c47aa1","Type":"ContainerStarted","Data":"f3528db4b7e5598bcc907fbbf9b6c8d23be76701f53aa9ac2091904cebbf6b6a"} Nov 26 22:40:24 crc kubenswrapper[4903]: I1126 22:40:24.431004 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-s4l25" Nov 26 22:40:24 crc kubenswrapper[4903]: I1126 22:40:24.435872 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-7d5fb4cbfb-ccgq6"] Nov 26 22:40:24 crc kubenswrapper[4903]: I1126 22:40:24.485845 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 22:40:24 crc kubenswrapper[4903]: I1126 22:40:24.545141 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-s4l25"] Nov 26 22:40:24 crc kubenswrapper[4903]: I1126 22:40:24.553031 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-s4l25"] Nov 26 22:40:24 crc kubenswrapper[4903]: I1126 22:40:24.577371 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-lqctt"] Nov 26 22:40:24 crc kubenswrapper[4903]: I1126 22:40:24.582362 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-lqctt"] Nov 26 22:40:24 crc kubenswrapper[4903]: W1126 22:40:24.587573 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddff09e4b_a38e_43fa_8394_e6922e356c4d.slice/crio-534823d7e51b5dd559d3ffe99e3cd1f5394cd6fe08d6502c87a7de69ef2a6944 WatchSource:0}: Error finding container 534823d7e51b5dd559d3ffe99e3cd1f5394cd6fe08d6502c87a7de69ef2a6944: Status 404 returned error can't find the container with id 534823d7e51b5dd559d3ffe99e3cd1f5394cd6fe08d6502c87a7de69ef2a6944 Nov 26 22:40:24 crc kubenswrapper[4903]: I1126 22:40:24.590855 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 22:40:24 crc kubenswrapper[4903]: I1126 22:40:24.599939 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7f98674c46-g2nz8"] Nov 26 22:40:24 crc kubenswrapper[4903]: W1126 22:40:24.604814 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4d75ba9f_0873_4d65_b0c9_5347134bfcce.slice/crio-496810aef529bd448d7f87ef2958c6a3af232a1ccdd4b59491215798143b4e40 WatchSource:0}: Error finding container 496810aef529bd448d7f87ef2958c6a3af232a1ccdd4b59491215798143b4e40: Status 404 returned error can't find the container with id 496810aef529bd448d7f87ef2958c6a3af232a1ccdd4b59491215798143b4e40 Nov 26 22:40:24 crc kubenswrapper[4903]: I1126 22:40:24.874509 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 22:40:24 crc kubenswrapper[4903]: W1126 22:40:24.893271 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podab9571fb_cb73_43ba_b0f3_fd1ef6b21a2e.slice/crio-de8a9bc8ff1fcce3746a8b5dc57a81cc5b962b9534a92de806805736c4818fbb WatchSource:0}: Error finding container de8a9bc8ff1fcce3746a8b5dc57a81cc5b962b9534a92de806805736c4818fbb: Status 404 returned error can't find the container with id de8a9bc8ff1fcce3746a8b5dc57a81cc5b962b9534a92de806805736c4818fbb Nov 26 22:40:25 crc kubenswrapper[4903]: I1126 22:40:25.540333 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-mv2r4"] Nov 26 22:40:25 crc kubenswrapper[4903]: I1126 22:40:25.577291 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-ccgq6" event={"ID":"dff09e4b-a38e-43fa-8394-e6922e356c4d","Type":"ContainerStarted","Data":"534823d7e51b5dd559d3ffe99e3cd1f5394cd6fe08d6502c87a7de69ef2a6944"} Nov 26 22:40:25 crc kubenswrapper[4903]: I1126 22:40:25.582387 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-kzb8j" event={"ID":"1aa29ea2-aaab-435e-9995-41a5f137be03","Type":"ContainerStarted","Data":"33a5da8ae669659a94c0fe1b02f63890afca61b1f6be416b1fa54820449533aa"} Nov 26 22:40:25 crc kubenswrapper[4903]: I1126 22:40:25.608873 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"a97b1b29-2461-47c7-a3f9-71837fe03413","Type":"ContainerStarted","Data":"04e34959aebfbba31367cd19f0fe0bba4187ce0a734a6ea30825e6142070d487"} Nov 26 22:40:25 crc kubenswrapper[4903]: I1126 22:40:25.613847 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f58d4082-e69c-44e2-9961-9842cb738869","Type":"ContainerStarted","Data":"811f0347a01b5853f4648e6ac01c00b8ed69289d5baa4432298544c62f314fd7"} Nov 26 22:40:25 crc kubenswrapper[4903]: I1126 22:40:25.618427 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"4d75ba9f-0873-4d65-b0c9-5347134bfcce","Type":"ContainerStarted","Data":"496810aef529bd448d7f87ef2958c6a3af232a1ccdd4b59491215798143b4e40"} Nov 26 22:40:25 crc kubenswrapper[4903]: I1126 22:40:25.629043 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7f98674c46-g2nz8" event={"ID":"846f1e9e-ba63-4012-a677-d3732a751ada","Type":"ContainerStarted","Data":"c443662835366fed09f5cd2304b3e8e101706576862ca46f57564542e1ed6f97"} Nov 26 22:40:25 crc kubenswrapper[4903]: I1126 22:40:25.629086 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7f98674c46-g2nz8" event={"ID":"846f1e9e-ba63-4012-a677-d3732a751ada","Type":"ContainerStarted","Data":"2c639995c676b72ca8060b88442f5d15264bed2badd82fe7112ff39f967f9d89"} Nov 26 22:40:25 crc kubenswrapper[4903]: I1126 22:40:25.649608 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e","Type":"ContainerStarted","Data":"de8a9bc8ff1fcce3746a8b5dc57a81cc5b962b9534a92de806805736c4818fbb"} Nov 26 22:40:25 crc kubenswrapper[4903]: I1126 22:40:25.654944 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 22:40:25 crc kubenswrapper[4903]: I1126 22:40:25.664121 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-b82qc" event={"ID":"0c6654bc-c2b5-45a0-93ab-c338beb90d3c","Type":"ContainerStarted","Data":"ab45a90e503b07f98ea3393a14e6933f48f3a82ac505b1800996d6c6b966457c"} Nov 26 22:40:25 crc kubenswrapper[4903]: I1126 22:40:25.664920 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-b82qc" Nov 26 22:40:25 crc kubenswrapper[4903]: I1126 22:40:25.669900 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-7f98674c46-g2nz8" podStartSLOduration=12.669882961999999 podStartE2EDuration="12.669882962s" podCreationTimestamp="2025-11-26 22:40:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:40:25.664325445 +0000 UTC m=+1154.354560355" watchObservedRunningTime="2025-11-26 22:40:25.669882962 +0000 UTC m=+1154.360117872" Nov 26 22:40:25 crc kubenswrapper[4903]: I1126 22:40:25.674845 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-jg7s2" event={"ID":"7f090a22-bd8a-4d9c-a477-c8b80664bbc5","Type":"ContainerStarted","Data":"5a6e9c975e0e0045f8d7f998c1116abed07d22f0c2b1d771cbed80a0419a37ac"} Nov 26 22:40:25 crc kubenswrapper[4903]: I1126 22:40:25.675469 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-666b6646f7-jg7s2" Nov 26 22:40:25 crc kubenswrapper[4903]: I1126 22:40:25.698121 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d769cc4f-b82qc" podStartSLOduration=3.551303759 podStartE2EDuration="20.698107528s" podCreationTimestamp="2025-11-26 22:40:05 +0000 UTC" firstStartedPulling="2025-11-26 22:40:06.299141698 +0000 UTC m=+1134.989376608" lastFinishedPulling="2025-11-26 22:40:23.445945467 +0000 UTC m=+1152.136180377" observedRunningTime="2025-11-26 22:40:25.69251271 +0000 UTC m=+1154.382747620" watchObservedRunningTime="2025-11-26 22:40:25.698107528 +0000 UTC m=+1154.388342438" Nov 26 22:40:25 crc kubenswrapper[4903]: I1126 22:40:25.714403 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-666b6646f7-jg7s2" podStartSLOduration=3.422646907 podStartE2EDuration="20.714385599s" podCreationTimestamp="2025-11-26 22:40:05 +0000 UTC" firstStartedPulling="2025-11-26 22:40:06.078242137 +0000 UTC m=+1134.768477047" lastFinishedPulling="2025-11-26 22:40:23.369980819 +0000 UTC m=+1152.060215739" observedRunningTime="2025-11-26 22:40:25.7083636 +0000 UTC m=+1154.398598510" watchObservedRunningTime="2025-11-26 22:40:25.714385599 +0000 UTC m=+1154.404620509" Nov 26 22:40:25 crc kubenswrapper[4903]: W1126 22:40:25.954357 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4286478b_1146_4f96_8819_753c3f6a6158.slice/crio-37430b947e25b9d3709c47f0a372837a86b39fde5f4dc6062138c0547c08e971 WatchSource:0}: Error finding container 37430b947e25b9d3709c47f0a372837a86b39fde5f4dc6062138c0547c08e971: Status 404 returned error can't find the container with id 37430b947e25b9d3709c47f0a372837a86b39fde5f4dc6062138c0547c08e971 Nov 26 22:40:25 crc kubenswrapper[4903]: W1126 22:40:25.959943 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbfc848b9_8183_4fb5_b8ce_d9542294079f.slice/crio-1e62d2ba1884fabb5f85a4198e98a53c297e08b6e1757f76902ef22a2f7d1509 WatchSource:0}: Error finding container 1e62d2ba1884fabb5f85a4198e98a53c297e08b6e1757f76902ef22a2f7d1509: Status 404 returned error can't find the container with id 1e62d2ba1884fabb5f85a4198e98a53c297e08b6e1757f76902ef22a2f7d1509 Nov 26 22:40:26 crc kubenswrapper[4903]: I1126 22:40:26.043301 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d5d45d8-1f13-4a89-8c86-573963d436ec" path="/var/lib/kubelet/pods/0d5d45d8-1f13-4a89-8c86-573963d436ec/volumes" Nov 26 22:40:26 crc kubenswrapper[4903]: I1126 22:40:26.043805 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5fa15be-adee-4af2-a008-61e95b025d3a" path="/var/lib/kubelet/pods/b5fa15be-adee-4af2-a008-61e95b025d3a/volumes" Nov 26 22:40:26 crc kubenswrapper[4903]: I1126 22:40:26.685791 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-mv2r4" event={"ID":"bfc848b9-8183-4fb5-b8ce-d9542294079f","Type":"ContainerStarted","Data":"1e62d2ba1884fabb5f85a4198e98a53c297e08b6e1757f76902ef22a2f7d1509"} Nov 26 22:40:26 crc kubenswrapper[4903]: I1126 22:40:26.687882 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"910b9022-54fc-4f7d-b69b-bdb7661cb91d","Type":"ContainerStarted","Data":"4dd0882b808a2194123234623a000bbdf57499bf825ea99e6092344a44365cc1"} Nov 26 22:40:26 crc kubenswrapper[4903]: I1126 22:40:26.690382 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"4286478b-1146-4f96-8819-753c3f6a6158","Type":"ContainerStarted","Data":"37430b947e25b9d3709c47f0a372837a86b39fde5f4dc6062138c0547c08e971"} Nov 26 22:40:30 crc kubenswrapper[4903]: I1126 22:40:30.383424 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-666b6646f7-jg7s2" Nov 26 22:40:30 crc kubenswrapper[4903]: I1126 22:40:30.856032 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-57d769cc4f-b82qc" Nov 26 22:40:30 crc kubenswrapper[4903]: I1126 22:40:30.927378 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-jg7s2"] Nov 26 22:40:30 crc kubenswrapper[4903]: I1126 22:40:30.927726 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-666b6646f7-jg7s2" podUID="7f090a22-bd8a-4d9c-a477-c8b80664bbc5" containerName="dnsmasq-dns" containerID="cri-o://5a6e9c975e0e0045f8d7f998c1116abed07d22f0c2b1d771cbed80a0419a37ac" gracePeriod=10 Nov 26 22:40:31 crc kubenswrapper[4903]: I1126 22:40:31.765876 4903 generic.go:334] "Generic (PLEG): container finished" podID="7f090a22-bd8a-4d9c-a477-c8b80664bbc5" containerID="5a6e9c975e0e0045f8d7f998c1116abed07d22f0c2b1d771cbed80a0419a37ac" exitCode=0 Nov 26 22:40:31 crc kubenswrapper[4903]: I1126 22:40:31.765945 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-jg7s2" event={"ID":"7f090a22-bd8a-4d9c-a477-c8b80664bbc5","Type":"ContainerDied","Data":"5a6e9c975e0e0045f8d7f998c1116abed07d22f0c2b1d771cbed80a0419a37ac"} Nov 26 22:40:33 crc kubenswrapper[4903]: I1126 22:40:33.410282 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-7f98674c46-g2nz8" Nov 26 22:40:33 crc kubenswrapper[4903]: I1126 22:40:33.410909 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-7f98674c46-g2nz8" Nov 26 22:40:33 crc kubenswrapper[4903]: I1126 22:40:33.416968 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-7f98674c46-g2nz8" Nov 26 22:40:33 crc kubenswrapper[4903]: I1126 22:40:33.788513 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-7f98674c46-g2nz8" Nov 26 22:40:33 crc kubenswrapper[4903]: I1126 22:40:33.857551 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-d87bcd8c7-tgv7l"] Nov 26 22:40:34 crc kubenswrapper[4903]: I1126 22:40:34.240942 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-jg7s2" Nov 26 22:40:34 crc kubenswrapper[4903]: I1126 22:40:34.358470 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f090a22-bd8a-4d9c-a477-c8b80664bbc5-config\") pod \"7f090a22-bd8a-4d9c-a477-c8b80664bbc5\" (UID: \"7f090a22-bd8a-4d9c-a477-c8b80664bbc5\") " Nov 26 22:40:34 crc kubenswrapper[4903]: I1126 22:40:34.358553 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-psvf9\" (UniqueName: \"kubernetes.io/projected/7f090a22-bd8a-4d9c-a477-c8b80664bbc5-kube-api-access-psvf9\") pod \"7f090a22-bd8a-4d9c-a477-c8b80664bbc5\" (UID: \"7f090a22-bd8a-4d9c-a477-c8b80664bbc5\") " Nov 26 22:40:34 crc kubenswrapper[4903]: I1126 22:40:34.358625 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7f090a22-bd8a-4d9c-a477-c8b80664bbc5-dns-svc\") pod \"7f090a22-bd8a-4d9c-a477-c8b80664bbc5\" (UID: \"7f090a22-bd8a-4d9c-a477-c8b80664bbc5\") " Nov 26 22:40:34 crc kubenswrapper[4903]: I1126 22:40:34.368776 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f090a22-bd8a-4d9c-a477-c8b80664bbc5-kube-api-access-psvf9" (OuterVolumeSpecName: "kube-api-access-psvf9") pod "7f090a22-bd8a-4d9c-a477-c8b80664bbc5" (UID: "7f090a22-bd8a-4d9c-a477-c8b80664bbc5"). InnerVolumeSpecName "kube-api-access-psvf9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:40:34 crc kubenswrapper[4903]: I1126 22:40:34.411875 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f090a22-bd8a-4d9c-a477-c8b80664bbc5-config" (OuterVolumeSpecName: "config") pod "7f090a22-bd8a-4d9c-a477-c8b80664bbc5" (UID: "7f090a22-bd8a-4d9c-a477-c8b80664bbc5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:40:34 crc kubenswrapper[4903]: I1126 22:40:34.416757 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f090a22-bd8a-4d9c-a477-c8b80664bbc5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7f090a22-bd8a-4d9c-a477-c8b80664bbc5" (UID: "7f090a22-bd8a-4d9c-a477-c8b80664bbc5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:40:34 crc kubenswrapper[4903]: I1126 22:40:34.461023 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f090a22-bd8a-4d9c-a477-c8b80664bbc5-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:40:34 crc kubenswrapper[4903]: I1126 22:40:34.461341 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-psvf9\" (UniqueName: \"kubernetes.io/projected/7f090a22-bd8a-4d9c-a477-c8b80664bbc5-kube-api-access-psvf9\") on node \"crc\" DevicePath \"\"" Nov 26 22:40:34 crc kubenswrapper[4903]: I1126 22:40:34.461358 4903 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7f090a22-bd8a-4d9c-a477-c8b80664bbc5-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 22:40:34 crc kubenswrapper[4903]: I1126 22:40:34.794789 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-jg7s2" event={"ID":"7f090a22-bd8a-4d9c-a477-c8b80664bbc5","Type":"ContainerDied","Data":"6c80308d4053bce66fc2ab5192d822e30656c64c44104858218a91dbf67c8aea"} Nov 26 22:40:34 crc kubenswrapper[4903]: I1126 22:40:34.794843 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-jg7s2" Nov 26 22:40:34 crc kubenswrapper[4903]: I1126 22:40:34.794857 4903 scope.go:117] "RemoveContainer" containerID="5a6e9c975e0e0045f8d7f998c1116abed07d22f0c2b1d771cbed80a0419a37ac" Nov 26 22:40:34 crc kubenswrapper[4903]: I1126 22:40:34.835830 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-jg7s2"] Nov 26 22:40:34 crc kubenswrapper[4903]: I1126 22:40:34.842035 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-jg7s2"] Nov 26 22:40:36 crc kubenswrapper[4903]: I1126 22:40:36.039136 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f090a22-bd8a-4d9c-a477-c8b80664bbc5" path="/var/lib/kubelet/pods/7f090a22-bd8a-4d9c-a477-c8b80664bbc5/volumes" Nov 26 22:40:36 crc kubenswrapper[4903]: E1126 22:40:36.328278 4903 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Nov 26 22:40:36 crc kubenswrapper[4903]: E1126 22:40:36.328331 4903 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Nov 26 22:40:36 crc kubenswrapper[4903]: E1126 22:40:36.328461 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-state-metrics,Image:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,Command:[],Args:[--resources=pods --namespaces=openstack],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:telemetry,HostPort:0,ContainerPort:8081,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-k26s4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/livez,Port:{0 8080 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod kube-state-metrics-0_openstack(f58d4082-e69c-44e2-9961-9842cb738869): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 22:40:36 crc kubenswrapper[4903]: E1126 22:40:36.329609 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openstack/kube-state-metrics-0" podUID="f58d4082-e69c-44e2-9961-9842cb738869" Nov 26 22:40:36 crc kubenswrapper[4903]: I1126 22:40:36.361358 4903 scope.go:117] "RemoveContainer" containerID="884d4cc8d7207cbda02636725d8904068a8d15754cec9b167e421e36500b25cd" Nov 26 22:40:36 crc kubenswrapper[4903]: I1126 22:40:36.827804 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"aabcbcd0-4cc0-495d-b059-6b8722c47aa1","Type":"ContainerStarted","Data":"aa17ae40fc00af2bcf6d3fa6a419efdab3df01fb8cd83b1a64b121015487462f"} Nov 26 22:40:36 crc kubenswrapper[4903]: I1126 22:40:36.831717 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-kzb8j" event={"ID":"1aa29ea2-aaab-435e-9995-41a5f137be03","Type":"ContainerStarted","Data":"d676463c4301a16b752e8236e71f268d23771581b953f5827c0556f2c4ff3329"} Nov 26 22:40:36 crc kubenswrapper[4903]: I1126 22:40:36.832471 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-kzb8j" Nov 26 22:40:36 crc kubenswrapper[4903]: I1126 22:40:36.834561 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b1969a76-48dc-4a53-8ee9-f9b5a5670e30","Type":"ContainerStarted","Data":"3b5bf7b986b4dd08bef4914f1f14fde067b2677070c6b33acc6e2999d7b471aa"} Nov 26 22:40:36 crc kubenswrapper[4903]: I1126 22:40:36.837069 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"6fd08b11-1328-47a3-82a3-286d70df4394","Type":"ContainerStarted","Data":"3db208726eb46ecb28b74b98b60379b1324a431c7823717eece681d855c6c35a"} Nov 26 22:40:36 crc kubenswrapper[4903]: I1126 22:40:36.837661 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 26 22:40:36 crc kubenswrapper[4903]: I1126 22:40:36.839429 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-ccgq6" event={"ID":"dff09e4b-a38e-43fa-8394-e6922e356c4d","Type":"ContainerStarted","Data":"848734ef8f27d2a0901c109dbfa3930add1cb64c51a43af7b03329b416f6144a"} Nov 26 22:40:36 crc kubenswrapper[4903]: I1126 22:40:36.841396 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e","Type":"ContainerStarted","Data":"f8428c4368e5e18db0b05e3f667845090d4f7fbe8e8c4e6f8e60b47f8e319af4"} Nov 26 22:40:36 crc kubenswrapper[4903]: I1126 22:40:36.849282 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"4286478b-1146-4f96-8819-753c3f6a6158","Type":"ContainerStarted","Data":"0048aa3f8c3445d068454400d800ca83837b0bdd9697be196b75e352ce00c78e"} Nov 26 22:40:36 crc kubenswrapper[4903]: E1126 22:40:36.850567 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0\\\"\"" pod="openstack/kube-state-metrics-0" podUID="f58d4082-e69c-44e2-9961-9842cb738869" Nov 26 22:40:36 crc kubenswrapper[4903]: I1126 22:40:36.877913 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-kzb8j" podStartSLOduration=10.899178642 podStartE2EDuration="20.877875681s" podCreationTimestamp="2025-11-26 22:40:16 +0000 UTC" firstStartedPulling="2025-11-26 22:40:24.514765943 +0000 UTC m=+1153.205000873" lastFinishedPulling="2025-11-26 22:40:34.493463002 +0000 UTC m=+1163.183697912" observedRunningTime="2025-11-26 22:40:36.867959929 +0000 UTC m=+1165.558194829" watchObservedRunningTime="2025-11-26 22:40:36.877875681 +0000 UTC m=+1165.568110591" Nov 26 22:40:36 crc kubenswrapper[4903]: I1126 22:40:36.891095 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=18.625131121 podStartE2EDuration="27.89107593s" podCreationTimestamp="2025-11-26 22:40:09 +0000 UTC" firstStartedPulling="2025-11-26 22:40:24.034441831 +0000 UTC m=+1152.724676731" lastFinishedPulling="2025-11-26 22:40:33.30038663 +0000 UTC m=+1161.990621540" observedRunningTime="2025-11-26 22:40:36.881954199 +0000 UTC m=+1165.572189119" watchObservedRunningTime="2025-11-26 22:40:36.89107593 +0000 UTC m=+1165.581310840" Nov 26 22:40:36 crc kubenswrapper[4903]: I1126 22:40:36.951526 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-ccgq6" podStartSLOduration=14.951771333 podStartE2EDuration="24.951511079s" podCreationTimestamp="2025-11-26 22:40:12 +0000 UTC" firstStartedPulling="2025-11-26 22:40:24.591042501 +0000 UTC m=+1153.281277411" lastFinishedPulling="2025-11-26 22:40:34.590782247 +0000 UTC m=+1163.281017157" observedRunningTime="2025-11-26 22:40:36.946404074 +0000 UTC m=+1165.636638984" watchObservedRunningTime="2025-11-26 22:40:36.951511079 +0000 UTC m=+1165.641745989" Nov 26 22:40:37 crc kubenswrapper[4903]: I1126 22:40:37.860797 4903 generic.go:334] "Generic (PLEG): container finished" podID="bfc848b9-8183-4fb5-b8ce-d9542294079f" containerID="41164041bc2d6f4c6817f9e7d2702a73c6edea3ea200813f67f1ffa70b3aba2d" exitCode=0 Nov 26 22:40:37 crc kubenswrapper[4903]: I1126 22:40:37.860878 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-mv2r4" event={"ID":"bfc848b9-8183-4fb5-b8ce-d9542294079f","Type":"ContainerDied","Data":"41164041bc2d6f4c6817f9e7d2702a73c6edea3ea200813f67f1ffa70b3aba2d"} Nov 26 22:40:38 crc kubenswrapper[4903]: I1126 22:40:38.876203 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-mv2r4" event={"ID":"bfc848b9-8183-4fb5-b8ce-d9542294079f","Type":"ContainerStarted","Data":"f2e532078493b33679aaff79b61262352bac15c3a39348bfc7a5b27ab259e9f1"} Nov 26 22:40:39 crc kubenswrapper[4903]: I1126 22:40:39.888990 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"4d75ba9f-0873-4d65-b0c9-5347134bfcce","Type":"ContainerStarted","Data":"dac4b31325676ccfbae2e456949bd0270301da2bf2abc00388945a4b26c423e4"} Nov 26 22:40:40 crc kubenswrapper[4903]: I1126 22:40:40.901760 4903 generic.go:334] "Generic (PLEG): container finished" podID="aabcbcd0-4cc0-495d-b059-6b8722c47aa1" containerID="aa17ae40fc00af2bcf6d3fa6a419efdab3df01fb8cd83b1a64b121015487462f" exitCode=0 Nov 26 22:40:40 crc kubenswrapper[4903]: I1126 22:40:40.901843 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"aabcbcd0-4cc0-495d-b059-6b8722c47aa1","Type":"ContainerDied","Data":"aa17ae40fc00af2bcf6d3fa6a419efdab3df01fb8cd83b1a64b121015487462f"} Nov 26 22:40:40 crc kubenswrapper[4903]: I1126 22:40:40.905359 4903 generic.go:334] "Generic (PLEG): container finished" podID="b1969a76-48dc-4a53-8ee9-f9b5a5670e30" containerID="3b5bf7b986b4dd08bef4914f1f14fde067b2677070c6b33acc6e2999d7b471aa" exitCode=0 Nov 26 22:40:40 crc kubenswrapper[4903]: I1126 22:40:40.905457 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b1969a76-48dc-4a53-8ee9-f9b5a5670e30","Type":"ContainerDied","Data":"3b5bf7b986b4dd08bef4914f1f14fde067b2677070c6b33acc6e2999d7b471aa"} Nov 26 22:40:40 crc kubenswrapper[4903]: I1126 22:40:40.909586 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e","Type":"ContainerStarted","Data":"ecce6b859760225bdf7a7fea29d019a6cbc169dba06035a6fce9b0d556c3260f"} Nov 26 22:40:40 crc kubenswrapper[4903]: I1126 22:40:40.919122 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-mv2r4" event={"ID":"bfc848b9-8183-4fb5-b8ce-d9542294079f","Type":"ContainerStarted","Data":"6966ec10c821eca7971c0c2f6c24b226070535bb51a4c8f13c7d030a725db0cc"} Nov 26 22:40:40 crc kubenswrapper[4903]: I1126 22:40:40.919288 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-mv2r4" Nov 26 22:40:40 crc kubenswrapper[4903]: I1126 22:40:40.919480 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-mv2r4" Nov 26 22:40:40 crc kubenswrapper[4903]: I1126 22:40:40.921966 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"4286478b-1146-4f96-8819-753c3f6a6158","Type":"ContainerStarted","Data":"5a7ffcd0e27e8ce887c68fe463ae049539a727509b1a3c672de7fb43dca996b8"} Nov 26 22:40:41 crc kubenswrapper[4903]: I1126 22:40:41.027724 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-mv2r4" podStartSLOduration=16.502109653 podStartE2EDuration="25.027707852s" podCreationTimestamp="2025-11-26 22:40:16 +0000 UTC" firstStartedPulling="2025-11-26 22:40:25.967868393 +0000 UTC m=+1154.658103303" lastFinishedPulling="2025-11-26 22:40:34.493466592 +0000 UTC m=+1163.183701502" observedRunningTime="2025-11-26 22:40:41.025867473 +0000 UTC m=+1169.716102383" watchObservedRunningTime="2025-11-26 22:40:41.027707852 +0000 UTC m=+1169.717942762" Nov 26 22:40:41 crc kubenswrapper[4903]: I1126 22:40:41.071737 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=8.556276502 podStartE2EDuration="24.071711669s" podCreationTimestamp="2025-11-26 22:40:17 +0000 UTC" firstStartedPulling="2025-11-26 22:40:24.89955254 +0000 UTC m=+1153.589787450" lastFinishedPulling="2025-11-26 22:40:40.414987697 +0000 UTC m=+1169.105222617" observedRunningTime="2025-11-26 22:40:41.062976165 +0000 UTC m=+1169.753211075" watchObservedRunningTime="2025-11-26 22:40:41.071711669 +0000 UTC m=+1169.761946619" Nov 26 22:40:41 crc kubenswrapper[4903]: I1126 22:40:41.102641 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=9.63183701 podStartE2EDuration="24.102619046s" podCreationTimestamp="2025-11-26 22:40:17 +0000 UTC" firstStartedPulling="2025-11-26 22:40:25.965003797 +0000 UTC m=+1154.655238707" lastFinishedPulling="2025-11-26 22:40:40.435785803 +0000 UTC m=+1169.126020743" observedRunningTime="2025-11-26 22:40:41.093976334 +0000 UTC m=+1169.784211254" watchObservedRunningTime="2025-11-26 22:40:41.102619046 +0000 UTC m=+1169.792853986" Nov 26 22:40:43 crc kubenswrapper[4903]: I1126 22:40:43.141973 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:43 crc kubenswrapper[4903]: I1126 22:40:43.218717 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:43 crc kubenswrapper[4903]: I1126 22:40:43.326906 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:43 crc kubenswrapper[4903]: I1126 22:40:43.422024 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:43 crc kubenswrapper[4903]: I1126 22:40:43.969053 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b1969a76-48dc-4a53-8ee9-f9b5a5670e30","Type":"ContainerStarted","Data":"5ead79221a3cf85415d3eab28d56de945c1a7bdf5b6ac3569300324f23d7cfa5"} Nov 26 22:40:43 crc kubenswrapper[4903]: I1126 22:40:43.971804 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"aabcbcd0-4cc0-495d-b059-6b8722c47aa1","Type":"ContainerStarted","Data":"7a5cc90b09fdaca470905ee7c5972b39dc8cba125c32f8855d3d7b536c3b5751"} Nov 26 22:40:43 crc kubenswrapper[4903]: I1126 22:40:43.972315 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:43 crc kubenswrapper[4903]: I1126 22:40:43.972352 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:43 crc kubenswrapper[4903]: I1126 22:40:43.998333 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=26.463685957 podStartE2EDuration="36.998316788s" podCreationTimestamp="2025-11-26 22:40:07 +0000 UTC" firstStartedPulling="2025-11-26 22:40:24.057602534 +0000 UTC m=+1152.747837444" lastFinishedPulling="2025-11-26 22:40:34.592233365 +0000 UTC m=+1163.282468275" observedRunningTime="2025-11-26 22:40:43.988597768 +0000 UTC m=+1172.678832678" watchObservedRunningTime="2025-11-26 22:40:43.998316788 +0000 UTC m=+1172.688551698" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.031972 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=25.684682571 podStartE2EDuration="36.031943687s" podCreationTimestamp="2025-11-26 22:40:08 +0000 UTC" firstStartedPulling="2025-11-26 22:40:23.992337357 +0000 UTC m=+1152.682572267" lastFinishedPulling="2025-11-26 22:40:34.339598433 +0000 UTC m=+1163.029833383" observedRunningTime="2025-11-26 22:40:44.017441709 +0000 UTC m=+1172.707676649" watchObservedRunningTime="2025-11-26 22:40:44.031943687 +0000 UTC m=+1172.722178627" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.044872 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.044960 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.269235 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-f8nz2"] Nov 26 22:40:44 crc kubenswrapper[4903]: E1126 22:40:44.269569 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f090a22-bd8a-4d9c-a477-c8b80664bbc5" containerName="dnsmasq-dns" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.269581 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f090a22-bd8a-4d9c-a477-c8b80664bbc5" containerName="dnsmasq-dns" Nov 26 22:40:44 crc kubenswrapper[4903]: E1126 22:40:44.269609 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f090a22-bd8a-4d9c-a477-c8b80664bbc5" containerName="init" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.269615 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f090a22-bd8a-4d9c-a477-c8b80664bbc5" containerName="init" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.269807 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f090a22-bd8a-4d9c-a477-c8b80664bbc5" containerName="dnsmasq-dns" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.270730 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-f8nz2" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.272941 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.287073 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-f8nz2"] Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.386957 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9cd97ea2-042b-4730-8f01-76fdff497904-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-f8nz2\" (UID: \"9cd97ea2-042b-4730-8f01-76fdff497904\") " pod="openstack/dnsmasq-dns-7fd796d7df-f8nz2" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.387005 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9cd97ea2-042b-4730-8f01-76fdff497904-config\") pod \"dnsmasq-dns-7fd796d7df-f8nz2\" (UID: \"9cd97ea2-042b-4730-8f01-76fdff497904\") " pod="openstack/dnsmasq-dns-7fd796d7df-f8nz2" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.387083 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9cd97ea2-042b-4730-8f01-76fdff497904-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-f8nz2\" (UID: \"9cd97ea2-042b-4730-8f01-76fdff497904\") " pod="openstack/dnsmasq-dns-7fd796d7df-f8nz2" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.387132 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lhzjz\" (UniqueName: \"kubernetes.io/projected/9cd97ea2-042b-4730-8f01-76fdff497904-kube-api-access-lhzjz\") pod \"dnsmasq-dns-7fd796d7df-f8nz2\" (UID: \"9cd97ea2-042b-4730-8f01-76fdff497904\") " pod="openstack/dnsmasq-dns-7fd796d7df-f8nz2" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.445851 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-f8nz2"] Nov 26 22:40:44 crc kubenswrapper[4903]: E1126 22:40:44.446476 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[config dns-svc kube-api-access-lhzjz ovsdbserver-nb], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/dnsmasq-dns-7fd796d7df-f8nz2" podUID="9cd97ea2-042b-4730-8f01-76fdff497904" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.459917 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-ksn6g"] Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.461017 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-ksn6g" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.465310 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.487619 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-f5z5h"] Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.488325 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d95087b9-4f77-4f65-b7bd-b799e673de6f-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-ksn6g\" (UID: \"d95087b9-4f77-4f65-b7bd-b799e673de6f\") " pod="openstack/ovn-controller-metrics-ksn6g" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.488377 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d95087b9-4f77-4f65-b7bd-b799e673de6f-combined-ca-bundle\") pod \"ovn-controller-metrics-ksn6g\" (UID: \"d95087b9-4f77-4f65-b7bd-b799e673de6f\") " pod="openstack/ovn-controller-metrics-ksn6g" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.488530 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9cd97ea2-042b-4730-8f01-76fdff497904-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-f8nz2\" (UID: \"9cd97ea2-042b-4730-8f01-76fdff497904\") " pod="openstack/dnsmasq-dns-7fd796d7df-f8nz2" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.488577 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/d95087b9-4f77-4f65-b7bd-b799e673de6f-ovn-rundir\") pod \"ovn-controller-metrics-ksn6g\" (UID: \"d95087b9-4f77-4f65-b7bd-b799e673de6f\") " pod="openstack/ovn-controller-metrics-ksn6g" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.488614 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9cd97ea2-042b-4730-8f01-76fdff497904-config\") pod \"dnsmasq-dns-7fd796d7df-f8nz2\" (UID: \"9cd97ea2-042b-4730-8f01-76fdff497904\") " pod="openstack/dnsmasq-dns-7fd796d7df-f8nz2" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.488723 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9cd97ea2-042b-4730-8f01-76fdff497904-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-f8nz2\" (UID: \"9cd97ea2-042b-4730-8f01-76fdff497904\") " pod="openstack/dnsmasq-dns-7fd796d7df-f8nz2" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.488857 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lhzjz\" (UniqueName: \"kubernetes.io/projected/9cd97ea2-042b-4730-8f01-76fdff497904-kube-api-access-lhzjz\") pod \"dnsmasq-dns-7fd796d7df-f8nz2\" (UID: \"9cd97ea2-042b-4730-8f01-76fdff497904\") " pod="openstack/dnsmasq-dns-7fd796d7df-f8nz2" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.488916 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d95087b9-4f77-4f65-b7bd-b799e673de6f-config\") pod \"ovn-controller-metrics-ksn6g\" (UID: \"d95087b9-4f77-4f65-b7bd-b799e673de6f\") " pod="openstack/ovn-controller-metrics-ksn6g" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.488952 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/d95087b9-4f77-4f65-b7bd-b799e673de6f-ovs-rundir\") pod \"ovn-controller-metrics-ksn6g\" (UID: \"d95087b9-4f77-4f65-b7bd-b799e673de6f\") " pod="openstack/ovn-controller-metrics-ksn6g" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.489046 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jp47b\" (UniqueName: \"kubernetes.io/projected/d95087b9-4f77-4f65-b7bd-b799e673de6f-kube-api-access-jp47b\") pod \"ovn-controller-metrics-ksn6g\" (UID: \"d95087b9-4f77-4f65-b7bd-b799e673de6f\") " pod="openstack/ovn-controller-metrics-ksn6g" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.489452 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-f5z5h" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.489865 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9cd97ea2-042b-4730-8f01-76fdff497904-config\") pod \"dnsmasq-dns-7fd796d7df-f8nz2\" (UID: \"9cd97ea2-042b-4730-8f01-76fdff497904\") " pod="openstack/dnsmasq-dns-7fd796d7df-f8nz2" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.489904 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9cd97ea2-042b-4730-8f01-76fdff497904-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-f8nz2\" (UID: \"9cd97ea2-042b-4730-8f01-76fdff497904\") " pod="openstack/dnsmasq-dns-7fd796d7df-f8nz2" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.489955 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9cd97ea2-042b-4730-8f01-76fdff497904-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-f8nz2\" (UID: \"9cd97ea2-042b-4730-8f01-76fdff497904\") " pod="openstack/dnsmasq-dns-7fd796d7df-f8nz2" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.496157 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.516177 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-f5z5h"] Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.522376 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lhzjz\" (UniqueName: \"kubernetes.io/projected/9cd97ea2-042b-4730-8f01-76fdff497904-kube-api-access-lhzjz\") pod \"dnsmasq-dns-7fd796d7df-f8nz2\" (UID: \"9cd97ea2-042b-4730-8f01-76fdff497904\") " pod="openstack/dnsmasq-dns-7fd796d7df-f8nz2" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.528897 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-ksn6g"] Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.556683 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.562452 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.564039 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.564942 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.565133 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.565275 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-f2kt7" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.572411 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.593887 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fd4368b-8ca7-41fc-afa4-92566225f4f4-config\") pod \"dnsmasq-dns-86db49b7ff-f5z5h\" (UID: \"4fd4368b-8ca7-41fc-afa4-92566225f4f4\") " pod="openstack/dnsmasq-dns-86db49b7ff-f5z5h" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.593945 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d95087b9-4f77-4f65-b7bd-b799e673de6f-config\") pod \"ovn-controller-metrics-ksn6g\" (UID: \"d95087b9-4f77-4f65-b7bd-b799e673de6f\") " pod="openstack/ovn-controller-metrics-ksn6g" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.593972 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/d95087b9-4f77-4f65-b7bd-b799e673de6f-ovs-rundir\") pod \"ovn-controller-metrics-ksn6g\" (UID: \"d95087b9-4f77-4f65-b7bd-b799e673de6f\") " pod="openstack/ovn-controller-metrics-ksn6g" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.594013 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jp47b\" (UniqueName: \"kubernetes.io/projected/d95087b9-4f77-4f65-b7bd-b799e673de6f-kube-api-access-jp47b\") pod \"ovn-controller-metrics-ksn6g\" (UID: \"d95087b9-4f77-4f65-b7bd-b799e673de6f\") " pod="openstack/ovn-controller-metrics-ksn6g" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.594038 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/16d5105b-5e4e-4806-a873-a79e1aaccc68-scripts\") pod \"ovn-northd-0\" (UID: \"16d5105b-5e4e-4806-a873-a79e1aaccc68\") " pod="openstack/ovn-northd-0" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.594053 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4fd4368b-8ca7-41fc-afa4-92566225f4f4-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-f5z5h\" (UID: \"4fd4368b-8ca7-41fc-afa4-92566225f4f4\") " pod="openstack/dnsmasq-dns-86db49b7ff-f5z5h" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.594077 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d95087b9-4f77-4f65-b7bd-b799e673de6f-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-ksn6g\" (UID: \"d95087b9-4f77-4f65-b7bd-b799e673de6f\") " pod="openstack/ovn-controller-metrics-ksn6g" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.594099 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d95087b9-4f77-4f65-b7bd-b799e673de6f-combined-ca-bundle\") pod \"ovn-controller-metrics-ksn6g\" (UID: \"d95087b9-4f77-4f65-b7bd-b799e673de6f\") " pod="openstack/ovn-controller-metrics-ksn6g" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.594130 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16d5105b-5e4e-4806-a873-a79e1aaccc68-config\") pod \"ovn-northd-0\" (UID: \"16d5105b-5e4e-4806-a873-a79e1aaccc68\") " pod="openstack/ovn-northd-0" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.594152 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mx4hv\" (UniqueName: \"kubernetes.io/projected/16d5105b-5e4e-4806-a873-a79e1aaccc68-kube-api-access-mx4hv\") pod \"ovn-northd-0\" (UID: \"16d5105b-5e4e-4806-a873-a79e1aaccc68\") " pod="openstack/ovn-northd-0" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.594172 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16d5105b-5e4e-4806-a873-a79e1aaccc68-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"16d5105b-5e4e-4806-a873-a79e1aaccc68\") " pod="openstack/ovn-northd-0" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.594191 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4hns\" (UniqueName: \"kubernetes.io/projected/4fd4368b-8ca7-41fc-afa4-92566225f4f4-kube-api-access-h4hns\") pod \"dnsmasq-dns-86db49b7ff-f5z5h\" (UID: \"4fd4368b-8ca7-41fc-afa4-92566225f4f4\") " pod="openstack/dnsmasq-dns-86db49b7ff-f5z5h" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.594210 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/d95087b9-4f77-4f65-b7bd-b799e673de6f-ovn-rundir\") pod \"ovn-controller-metrics-ksn6g\" (UID: \"d95087b9-4f77-4f65-b7bd-b799e673de6f\") " pod="openstack/ovn-controller-metrics-ksn6g" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.594235 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/16d5105b-5e4e-4806-a873-a79e1aaccc68-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"16d5105b-5e4e-4806-a873-a79e1aaccc68\") " pod="openstack/ovn-northd-0" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.594261 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4fd4368b-8ca7-41fc-afa4-92566225f4f4-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-f5z5h\" (UID: \"4fd4368b-8ca7-41fc-afa4-92566225f4f4\") " pod="openstack/dnsmasq-dns-86db49b7ff-f5z5h" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.594280 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/16d5105b-5e4e-4806-a873-a79e1aaccc68-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"16d5105b-5e4e-4806-a873-a79e1aaccc68\") " pod="openstack/ovn-northd-0" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.594314 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4fd4368b-8ca7-41fc-afa4-92566225f4f4-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-f5z5h\" (UID: \"4fd4368b-8ca7-41fc-afa4-92566225f4f4\") " pod="openstack/dnsmasq-dns-86db49b7ff-f5z5h" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.594336 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/16d5105b-5e4e-4806-a873-a79e1aaccc68-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"16d5105b-5e4e-4806-a873-a79e1aaccc68\") " pod="openstack/ovn-northd-0" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.594334 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/d95087b9-4f77-4f65-b7bd-b799e673de6f-ovs-rundir\") pod \"ovn-controller-metrics-ksn6g\" (UID: \"d95087b9-4f77-4f65-b7bd-b799e673de6f\") " pod="openstack/ovn-controller-metrics-ksn6g" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.594673 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/d95087b9-4f77-4f65-b7bd-b799e673de6f-ovn-rundir\") pod \"ovn-controller-metrics-ksn6g\" (UID: \"d95087b9-4f77-4f65-b7bd-b799e673de6f\") " pod="openstack/ovn-controller-metrics-ksn6g" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.595107 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d95087b9-4f77-4f65-b7bd-b799e673de6f-config\") pod \"ovn-controller-metrics-ksn6g\" (UID: \"d95087b9-4f77-4f65-b7bd-b799e673de6f\") " pod="openstack/ovn-controller-metrics-ksn6g" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.600368 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d95087b9-4f77-4f65-b7bd-b799e673de6f-combined-ca-bundle\") pod \"ovn-controller-metrics-ksn6g\" (UID: \"d95087b9-4f77-4f65-b7bd-b799e673de6f\") " pod="openstack/ovn-controller-metrics-ksn6g" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.614636 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d95087b9-4f77-4f65-b7bd-b799e673de6f-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-ksn6g\" (UID: \"d95087b9-4f77-4f65-b7bd-b799e673de6f\") " pod="openstack/ovn-controller-metrics-ksn6g" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.615118 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jp47b\" (UniqueName: \"kubernetes.io/projected/d95087b9-4f77-4f65-b7bd-b799e673de6f-kube-api-access-jp47b\") pod \"ovn-controller-metrics-ksn6g\" (UID: \"d95087b9-4f77-4f65-b7bd-b799e673de6f\") " pod="openstack/ovn-controller-metrics-ksn6g" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.695844 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mx4hv\" (UniqueName: \"kubernetes.io/projected/16d5105b-5e4e-4806-a873-a79e1aaccc68-kube-api-access-mx4hv\") pod \"ovn-northd-0\" (UID: \"16d5105b-5e4e-4806-a873-a79e1aaccc68\") " pod="openstack/ovn-northd-0" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.695885 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16d5105b-5e4e-4806-a873-a79e1aaccc68-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"16d5105b-5e4e-4806-a873-a79e1aaccc68\") " pod="openstack/ovn-northd-0" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.695908 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h4hns\" (UniqueName: \"kubernetes.io/projected/4fd4368b-8ca7-41fc-afa4-92566225f4f4-kube-api-access-h4hns\") pod \"dnsmasq-dns-86db49b7ff-f5z5h\" (UID: \"4fd4368b-8ca7-41fc-afa4-92566225f4f4\") " pod="openstack/dnsmasq-dns-86db49b7ff-f5z5h" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.695954 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/16d5105b-5e4e-4806-a873-a79e1aaccc68-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"16d5105b-5e4e-4806-a873-a79e1aaccc68\") " pod="openstack/ovn-northd-0" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.695984 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4fd4368b-8ca7-41fc-afa4-92566225f4f4-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-f5z5h\" (UID: \"4fd4368b-8ca7-41fc-afa4-92566225f4f4\") " pod="openstack/dnsmasq-dns-86db49b7ff-f5z5h" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.696006 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/16d5105b-5e4e-4806-a873-a79e1aaccc68-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"16d5105b-5e4e-4806-a873-a79e1aaccc68\") " pod="openstack/ovn-northd-0" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.696042 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4fd4368b-8ca7-41fc-afa4-92566225f4f4-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-f5z5h\" (UID: \"4fd4368b-8ca7-41fc-afa4-92566225f4f4\") " pod="openstack/dnsmasq-dns-86db49b7ff-f5z5h" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.696068 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/16d5105b-5e4e-4806-a873-a79e1aaccc68-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"16d5105b-5e4e-4806-a873-a79e1aaccc68\") " pod="openstack/ovn-northd-0" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.696103 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fd4368b-8ca7-41fc-afa4-92566225f4f4-config\") pod \"dnsmasq-dns-86db49b7ff-f5z5h\" (UID: \"4fd4368b-8ca7-41fc-afa4-92566225f4f4\") " pod="openstack/dnsmasq-dns-86db49b7ff-f5z5h" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.696156 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/16d5105b-5e4e-4806-a873-a79e1aaccc68-scripts\") pod \"ovn-northd-0\" (UID: \"16d5105b-5e4e-4806-a873-a79e1aaccc68\") " pod="openstack/ovn-northd-0" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.696173 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4fd4368b-8ca7-41fc-afa4-92566225f4f4-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-f5z5h\" (UID: \"4fd4368b-8ca7-41fc-afa4-92566225f4f4\") " pod="openstack/dnsmasq-dns-86db49b7ff-f5z5h" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.696219 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16d5105b-5e4e-4806-a873-a79e1aaccc68-config\") pod \"ovn-northd-0\" (UID: \"16d5105b-5e4e-4806-a873-a79e1aaccc68\") " pod="openstack/ovn-northd-0" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.697037 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4fd4368b-8ca7-41fc-afa4-92566225f4f4-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-f5z5h\" (UID: \"4fd4368b-8ca7-41fc-afa4-92566225f4f4\") " pod="openstack/dnsmasq-dns-86db49b7ff-f5z5h" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.697042 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16d5105b-5e4e-4806-a873-a79e1aaccc68-config\") pod \"ovn-northd-0\" (UID: \"16d5105b-5e4e-4806-a873-a79e1aaccc68\") " pod="openstack/ovn-northd-0" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.697986 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4fd4368b-8ca7-41fc-afa4-92566225f4f4-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-f5z5h\" (UID: \"4fd4368b-8ca7-41fc-afa4-92566225f4f4\") " pod="openstack/dnsmasq-dns-86db49b7ff-f5z5h" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.698361 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/16d5105b-5e4e-4806-a873-a79e1aaccc68-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"16d5105b-5e4e-4806-a873-a79e1aaccc68\") " pod="openstack/ovn-northd-0" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.698574 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fd4368b-8ca7-41fc-afa4-92566225f4f4-config\") pod \"dnsmasq-dns-86db49b7ff-f5z5h\" (UID: \"4fd4368b-8ca7-41fc-afa4-92566225f4f4\") " pod="openstack/dnsmasq-dns-86db49b7ff-f5z5h" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.698611 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/16d5105b-5e4e-4806-a873-a79e1aaccc68-scripts\") pod \"ovn-northd-0\" (UID: \"16d5105b-5e4e-4806-a873-a79e1aaccc68\") " pod="openstack/ovn-northd-0" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.699004 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4fd4368b-8ca7-41fc-afa4-92566225f4f4-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-f5z5h\" (UID: \"4fd4368b-8ca7-41fc-afa4-92566225f4f4\") " pod="openstack/dnsmasq-dns-86db49b7ff-f5z5h" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.699984 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16d5105b-5e4e-4806-a873-a79e1aaccc68-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"16d5105b-5e4e-4806-a873-a79e1aaccc68\") " pod="openstack/ovn-northd-0" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.701186 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/16d5105b-5e4e-4806-a873-a79e1aaccc68-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"16d5105b-5e4e-4806-a873-a79e1aaccc68\") " pod="openstack/ovn-northd-0" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.707181 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/16d5105b-5e4e-4806-a873-a79e1aaccc68-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"16d5105b-5e4e-4806-a873-a79e1aaccc68\") " pod="openstack/ovn-northd-0" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.714379 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4hns\" (UniqueName: \"kubernetes.io/projected/4fd4368b-8ca7-41fc-afa4-92566225f4f4-kube-api-access-h4hns\") pod \"dnsmasq-dns-86db49b7ff-f5z5h\" (UID: \"4fd4368b-8ca7-41fc-afa4-92566225f4f4\") " pod="openstack/dnsmasq-dns-86db49b7ff-f5z5h" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.714405 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mx4hv\" (UniqueName: \"kubernetes.io/projected/16d5105b-5e4e-4806-a873-a79e1aaccc68-kube-api-access-mx4hv\") pod \"ovn-northd-0\" (UID: \"16d5105b-5e4e-4806-a873-a79e1aaccc68\") " pod="openstack/ovn-northd-0" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.780054 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-ksn6g" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.814501 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-f5z5h" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.885914 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.986624 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-f8nz2" Nov 26 22:40:44 crc kubenswrapper[4903]: I1126 22:40:44.999308 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-f8nz2" Nov 26 22:40:45 crc kubenswrapper[4903]: I1126 22:40:45.102245 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9cd97ea2-042b-4730-8f01-76fdff497904-ovsdbserver-nb\") pod \"9cd97ea2-042b-4730-8f01-76fdff497904\" (UID: \"9cd97ea2-042b-4730-8f01-76fdff497904\") " Nov 26 22:40:45 crc kubenswrapper[4903]: I1126 22:40:45.102442 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9cd97ea2-042b-4730-8f01-76fdff497904-dns-svc\") pod \"9cd97ea2-042b-4730-8f01-76fdff497904\" (UID: \"9cd97ea2-042b-4730-8f01-76fdff497904\") " Nov 26 22:40:45 crc kubenswrapper[4903]: I1126 22:40:45.102491 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9cd97ea2-042b-4730-8f01-76fdff497904-config\") pod \"9cd97ea2-042b-4730-8f01-76fdff497904\" (UID: \"9cd97ea2-042b-4730-8f01-76fdff497904\") " Nov 26 22:40:45 crc kubenswrapper[4903]: I1126 22:40:45.102565 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lhzjz\" (UniqueName: \"kubernetes.io/projected/9cd97ea2-042b-4730-8f01-76fdff497904-kube-api-access-lhzjz\") pod \"9cd97ea2-042b-4730-8f01-76fdff497904\" (UID: \"9cd97ea2-042b-4730-8f01-76fdff497904\") " Nov 26 22:40:45 crc kubenswrapper[4903]: I1126 22:40:45.102790 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9cd97ea2-042b-4730-8f01-76fdff497904-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9cd97ea2-042b-4730-8f01-76fdff497904" (UID: "9cd97ea2-042b-4730-8f01-76fdff497904"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:40:45 crc kubenswrapper[4903]: I1126 22:40:45.102853 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9cd97ea2-042b-4730-8f01-76fdff497904-config" (OuterVolumeSpecName: "config") pod "9cd97ea2-042b-4730-8f01-76fdff497904" (UID: "9cd97ea2-042b-4730-8f01-76fdff497904"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:40:45 crc kubenswrapper[4903]: I1126 22:40:45.103752 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9cd97ea2-042b-4730-8f01-76fdff497904-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:40:45 crc kubenswrapper[4903]: I1126 22:40:45.103775 4903 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9cd97ea2-042b-4730-8f01-76fdff497904-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 22:40:45 crc kubenswrapper[4903]: I1126 22:40:45.104067 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9cd97ea2-042b-4730-8f01-76fdff497904-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9cd97ea2-042b-4730-8f01-76fdff497904" (UID: "9cd97ea2-042b-4730-8f01-76fdff497904"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:40:45 crc kubenswrapper[4903]: I1126 22:40:45.108909 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9cd97ea2-042b-4730-8f01-76fdff497904-kube-api-access-lhzjz" (OuterVolumeSpecName: "kube-api-access-lhzjz") pod "9cd97ea2-042b-4730-8f01-76fdff497904" (UID: "9cd97ea2-042b-4730-8f01-76fdff497904"). InnerVolumeSpecName "kube-api-access-lhzjz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:40:45 crc kubenswrapper[4903]: I1126 22:40:45.110250 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 26 22:40:45 crc kubenswrapper[4903]: I1126 22:40:45.204968 4903 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9cd97ea2-042b-4730-8f01-76fdff497904-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 22:40:45 crc kubenswrapper[4903]: I1126 22:40:45.204997 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lhzjz\" (UniqueName: \"kubernetes.io/projected/9cd97ea2-042b-4730-8f01-76fdff497904-kube-api-access-lhzjz\") on node \"crc\" DevicePath \"\"" Nov 26 22:40:45 crc kubenswrapper[4903]: I1126 22:40:45.250881 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-ksn6g"] Nov 26 22:40:45 crc kubenswrapper[4903]: I1126 22:40:45.345822 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-f5z5h"] Nov 26 22:40:45 crc kubenswrapper[4903]: I1126 22:40:45.494986 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 26 22:40:45 crc kubenswrapper[4903]: W1126 22:40:45.497299 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod16d5105b_5e4e_4806_a873_a79e1aaccc68.slice/crio-c93caa0c2f4c934f85d7cb1cc7573bd963ee51fe8e5ff01b287fe8471f185254 WatchSource:0}: Error finding container c93caa0c2f4c934f85d7cb1cc7573bd963ee51fe8e5ff01b287fe8471f185254: Status 404 returned error can't find the container with id c93caa0c2f4c934f85d7cb1cc7573bd963ee51fe8e5ff01b287fe8471f185254 Nov 26 22:40:45 crc kubenswrapper[4903]: I1126 22:40:45.997903 4903 generic.go:334] "Generic (PLEG): container finished" podID="4fd4368b-8ca7-41fc-afa4-92566225f4f4" containerID="491eca65525151d0bec441ff3ff53c2e17ebb0592b00e064996eb049bd50171a" exitCode=0 Nov 26 22:40:45 crc kubenswrapper[4903]: I1126 22:40:45.997952 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-f5z5h" event={"ID":"4fd4368b-8ca7-41fc-afa4-92566225f4f4","Type":"ContainerDied","Data":"491eca65525151d0bec441ff3ff53c2e17ebb0592b00e064996eb049bd50171a"} Nov 26 22:40:45 crc kubenswrapper[4903]: I1126 22:40:45.998727 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-f5z5h" event={"ID":"4fd4368b-8ca7-41fc-afa4-92566225f4f4","Type":"ContainerStarted","Data":"6330f10522ac50a9e40d8f066434a422c6ff0f25a4b68f5b8c0ebca2ab48a8ec"} Nov 26 22:40:46 crc kubenswrapper[4903]: I1126 22:40:45.999934 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"16d5105b-5e4e-4806-a873-a79e1aaccc68","Type":"ContainerStarted","Data":"c93caa0c2f4c934f85d7cb1cc7573bd963ee51fe8e5ff01b287fe8471f185254"} Nov 26 22:40:46 crc kubenswrapper[4903]: I1126 22:40:46.001427 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-ksn6g" event={"ID":"d95087b9-4f77-4f65-b7bd-b799e673de6f","Type":"ContainerStarted","Data":"25998c935e936af9c86ef550329f253e46c624ed62aba423f8ac93d6760f76ba"} Nov 26 22:40:46 crc kubenswrapper[4903]: I1126 22:40:46.001454 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-ksn6g" event={"ID":"d95087b9-4f77-4f65-b7bd-b799e673de6f","Type":"ContainerStarted","Data":"24574f76297806880f178940db65408f298ecc4617bca72040e9ce9305fa509c"} Nov 26 22:40:46 crc kubenswrapper[4903]: I1126 22:40:46.002321 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-f8nz2" Nov 26 22:40:47 crc kubenswrapper[4903]: I1126 22:40:47.016905 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-f5z5h" event={"ID":"4fd4368b-8ca7-41fc-afa4-92566225f4f4","Type":"ContainerStarted","Data":"5717437173def688903ebfb9e5cc41421f602a5bc8691fb7256401776df600dc"} Nov 26 22:40:47 crc kubenswrapper[4903]: I1126 22:40:47.020783 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86db49b7ff-f5z5h" Nov 26 22:40:47 crc kubenswrapper[4903]: I1126 22:40:47.022606 4903 generic.go:334] "Generic (PLEG): container finished" podID="4d75ba9f-0873-4d65-b0c9-5347134bfcce" containerID="dac4b31325676ccfbae2e456949bd0270301da2bf2abc00388945a4b26c423e4" exitCode=0 Nov 26 22:40:47 crc kubenswrapper[4903]: I1126 22:40:47.023428 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"4d75ba9f-0873-4d65-b0c9-5347134bfcce","Type":"ContainerDied","Data":"dac4b31325676ccfbae2e456949bd0270301da2bf2abc00388945a4b26c423e4"} Nov 26 22:40:47 crc kubenswrapper[4903]: I1126 22:40:47.037105 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-ksn6g" podStartSLOduration=3.037090656 podStartE2EDuration="3.037090656s" podCreationTimestamp="2025-11-26 22:40:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:40:46.047537008 +0000 UTC m=+1174.737771928" watchObservedRunningTime="2025-11-26 22:40:47.037090656 +0000 UTC m=+1175.727325566" Nov 26 22:40:47 crc kubenswrapper[4903]: I1126 22:40:47.045566 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-86db49b7ff-f5z5h" podStartSLOduration=3.045554212 podStartE2EDuration="3.045554212s" podCreationTimestamp="2025-11-26 22:40:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:40:47.033314504 +0000 UTC m=+1175.723549414" watchObservedRunningTime="2025-11-26 22:40:47.045554212 +0000 UTC m=+1175.735789122" Nov 26 22:40:48 crc kubenswrapper[4903]: I1126 22:40:48.052047 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"16d5105b-5e4e-4806-a873-a79e1aaccc68","Type":"ContainerStarted","Data":"341a764dbda0ae4d51b737d5a8c219de5718a980b655b82a6fea775710e46836"} Nov 26 22:40:48 crc kubenswrapper[4903]: I1126 22:40:48.052996 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"16d5105b-5e4e-4806-a873-a79e1aaccc68","Type":"ContainerStarted","Data":"f8bc2085abf3c3ba6a22d50f79e0297fdd8f42e9790d1e2da409c435d35c75fe"} Nov 26 22:40:48 crc kubenswrapper[4903]: I1126 22:40:48.078843 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.568816756 podStartE2EDuration="4.078809809s" podCreationTimestamp="2025-11-26 22:40:44 +0000 UTC" firstStartedPulling="2025-11-26 22:40:45.499601437 +0000 UTC m=+1174.189836347" lastFinishedPulling="2025-11-26 22:40:47.00959447 +0000 UTC m=+1175.699829400" observedRunningTime="2025-11-26 22:40:48.070608649 +0000 UTC m=+1176.760843599" watchObservedRunningTime="2025-11-26 22:40:48.078809809 +0000 UTC m=+1176.769044749" Nov 26 22:40:48 crc kubenswrapper[4903]: I1126 22:40:48.498261 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 26 22:40:48 crc kubenswrapper[4903]: I1126 22:40:48.498343 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 26 22:40:49 crc kubenswrapper[4903]: I1126 22:40:49.050337 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 26 22:40:50 crc kubenswrapper[4903]: I1126 22:40:50.009503 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:50 crc kubenswrapper[4903]: I1126 22:40:50.009793 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:50 crc kubenswrapper[4903]: I1126 22:40:50.086424 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:50 crc kubenswrapper[4903]: I1126 22:40:50.154080 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 26 22:40:50 crc kubenswrapper[4903]: I1126 22:40:50.769377 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 26 22:40:50 crc kubenswrapper[4903]: I1126 22:40:50.861424 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.251605 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-829mx"] Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.253060 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-829mx" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.268956 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-829mx"] Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.303415 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-f5z5h"] Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.303629 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-86db49b7ff-f5z5h" podUID="4fd4368b-8ca7-41fc-afa4-92566225f4f4" containerName="dnsmasq-dns" containerID="cri-o://5717437173def688903ebfb9e5cc41421f602a5bc8691fb7256401776df600dc" gracePeriod=10 Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.306131 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-86db49b7ff-f5z5h" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.361720 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szcxm\" (UniqueName: \"kubernetes.io/projected/84562f82-5408-4031-be94-2933a87dd5b0-kube-api-access-szcxm\") pod \"mysqld-exporter-openstack-db-create-829mx\" (UID: \"84562f82-5408-4031-be94-2933a87dd5b0\") " pod="openstack/mysqld-exporter-openstack-db-create-829mx" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.361770 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/84562f82-5408-4031-be94-2933a87dd5b0-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-829mx\" (UID: \"84562f82-5408-4031-be94-2933a87dd5b0\") " pod="openstack/mysqld-exporter-openstack-db-create-829mx" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.396330 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-698758b865-5vn9p"] Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.397956 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-5vn9p" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.415960 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-5vn9p"] Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.464354 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/96fe1cef-e83b-48c2-8731-f0c74f19ce91-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-5vn9p\" (UID: \"96fe1cef-e83b-48c2-8731-f0c74f19ce91\") " pod="openstack/dnsmasq-dns-698758b865-5vn9p" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.464630 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szcxm\" (UniqueName: \"kubernetes.io/projected/84562f82-5408-4031-be94-2933a87dd5b0-kube-api-access-szcxm\") pod \"mysqld-exporter-openstack-db-create-829mx\" (UID: \"84562f82-5408-4031-be94-2933a87dd5b0\") " pod="openstack/mysqld-exporter-openstack-db-create-829mx" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.464661 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/84562f82-5408-4031-be94-2933a87dd5b0-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-829mx\" (UID: \"84562f82-5408-4031-be94-2933a87dd5b0\") " pod="openstack/mysqld-exporter-openstack-db-create-829mx" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.464740 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96fe1cef-e83b-48c2-8731-f0c74f19ce91-config\") pod \"dnsmasq-dns-698758b865-5vn9p\" (UID: \"96fe1cef-e83b-48c2-8731-f0c74f19ce91\") " pod="openstack/dnsmasq-dns-698758b865-5vn9p" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.464795 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/96fe1cef-e83b-48c2-8731-f0c74f19ce91-dns-svc\") pod \"dnsmasq-dns-698758b865-5vn9p\" (UID: \"96fe1cef-e83b-48c2-8731-f0c74f19ce91\") " pod="openstack/dnsmasq-dns-698758b865-5vn9p" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.464854 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/96fe1cef-e83b-48c2-8731-f0c74f19ce91-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-5vn9p\" (UID: \"96fe1cef-e83b-48c2-8731-f0c74f19ce91\") " pod="openstack/dnsmasq-dns-698758b865-5vn9p" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.464881 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8cjv\" (UniqueName: \"kubernetes.io/projected/96fe1cef-e83b-48c2-8731-f0c74f19ce91-kube-api-access-f8cjv\") pod \"dnsmasq-dns-698758b865-5vn9p\" (UID: \"96fe1cef-e83b-48c2-8731-f0c74f19ce91\") " pod="openstack/dnsmasq-dns-698758b865-5vn9p" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.465936 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/84562f82-5408-4031-be94-2933a87dd5b0-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-829mx\" (UID: \"84562f82-5408-4031-be94-2933a87dd5b0\") " pod="openstack/mysqld-exporter-openstack-db-create-829mx" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.482113 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-df57-account-create-update-7g5m8"] Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.483498 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-df57-account-create-update-7g5m8" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.485673 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-openstack-db-secret" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.491511 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szcxm\" (UniqueName: \"kubernetes.io/projected/84562f82-5408-4031-be94-2933a87dd5b0-kube-api-access-szcxm\") pod \"mysqld-exporter-openstack-db-create-829mx\" (UID: \"84562f82-5408-4031-be94-2933a87dd5b0\") " pod="openstack/mysqld-exporter-openstack-db-create-829mx" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.505152 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-df57-account-create-update-7g5m8"] Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.566503 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/96fe1cef-e83b-48c2-8731-f0c74f19ce91-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-5vn9p\" (UID: \"96fe1cef-e83b-48c2-8731-f0c74f19ce91\") " pod="openstack/dnsmasq-dns-698758b865-5vn9p" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.566560 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8cjv\" (UniqueName: \"kubernetes.io/projected/96fe1cef-e83b-48c2-8731-f0c74f19ce91-kube-api-access-f8cjv\") pod \"dnsmasq-dns-698758b865-5vn9p\" (UID: \"96fe1cef-e83b-48c2-8731-f0c74f19ce91\") " pod="openstack/dnsmasq-dns-698758b865-5vn9p" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.566604 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/96fe1cef-e83b-48c2-8731-f0c74f19ce91-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-5vn9p\" (UID: \"96fe1cef-e83b-48c2-8731-f0c74f19ce91\") " pod="openstack/dnsmasq-dns-698758b865-5vn9p" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.566632 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29ec5ffe-92c2-4f30-8477-913d14b49415-operator-scripts\") pod \"mysqld-exporter-df57-account-create-update-7g5m8\" (UID: \"29ec5ffe-92c2-4f30-8477-913d14b49415\") " pod="openstack/mysqld-exporter-df57-account-create-update-7g5m8" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.566668 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z5426\" (UniqueName: \"kubernetes.io/projected/29ec5ffe-92c2-4f30-8477-913d14b49415-kube-api-access-z5426\") pod \"mysqld-exporter-df57-account-create-update-7g5m8\" (UID: \"29ec5ffe-92c2-4f30-8477-913d14b49415\") " pod="openstack/mysqld-exporter-df57-account-create-update-7g5m8" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.566739 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96fe1cef-e83b-48c2-8731-f0c74f19ce91-config\") pod \"dnsmasq-dns-698758b865-5vn9p\" (UID: \"96fe1cef-e83b-48c2-8731-f0c74f19ce91\") " pod="openstack/dnsmasq-dns-698758b865-5vn9p" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.566795 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/96fe1cef-e83b-48c2-8731-f0c74f19ce91-dns-svc\") pod \"dnsmasq-dns-698758b865-5vn9p\" (UID: \"96fe1cef-e83b-48c2-8731-f0c74f19ce91\") " pod="openstack/dnsmasq-dns-698758b865-5vn9p" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.567546 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/96fe1cef-e83b-48c2-8731-f0c74f19ce91-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-5vn9p\" (UID: \"96fe1cef-e83b-48c2-8731-f0c74f19ce91\") " pod="openstack/dnsmasq-dns-698758b865-5vn9p" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.567567 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/96fe1cef-e83b-48c2-8731-f0c74f19ce91-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-5vn9p\" (UID: \"96fe1cef-e83b-48c2-8731-f0c74f19ce91\") " pod="openstack/dnsmasq-dns-698758b865-5vn9p" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.567998 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96fe1cef-e83b-48c2-8731-f0c74f19ce91-config\") pod \"dnsmasq-dns-698758b865-5vn9p\" (UID: \"96fe1cef-e83b-48c2-8731-f0c74f19ce91\") " pod="openstack/dnsmasq-dns-698758b865-5vn9p" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.568313 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/96fe1cef-e83b-48c2-8731-f0c74f19ce91-dns-svc\") pod \"dnsmasq-dns-698758b865-5vn9p\" (UID: \"96fe1cef-e83b-48c2-8731-f0c74f19ce91\") " pod="openstack/dnsmasq-dns-698758b865-5vn9p" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.583553 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8cjv\" (UniqueName: \"kubernetes.io/projected/96fe1cef-e83b-48c2-8731-f0c74f19ce91-kube-api-access-f8cjv\") pod \"dnsmasq-dns-698758b865-5vn9p\" (UID: \"96fe1cef-e83b-48c2-8731-f0c74f19ce91\") " pod="openstack/dnsmasq-dns-698758b865-5vn9p" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.586027 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-829mx" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.669986 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29ec5ffe-92c2-4f30-8477-913d14b49415-operator-scripts\") pod \"mysqld-exporter-df57-account-create-update-7g5m8\" (UID: \"29ec5ffe-92c2-4f30-8477-913d14b49415\") " pod="openstack/mysqld-exporter-df57-account-create-update-7g5m8" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.670043 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z5426\" (UniqueName: \"kubernetes.io/projected/29ec5ffe-92c2-4f30-8477-913d14b49415-kube-api-access-z5426\") pod \"mysqld-exporter-df57-account-create-update-7g5m8\" (UID: \"29ec5ffe-92c2-4f30-8477-913d14b49415\") " pod="openstack/mysqld-exporter-df57-account-create-update-7g5m8" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.670716 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29ec5ffe-92c2-4f30-8477-913d14b49415-operator-scripts\") pod \"mysqld-exporter-df57-account-create-update-7g5m8\" (UID: \"29ec5ffe-92c2-4f30-8477-913d14b49415\") " pod="openstack/mysqld-exporter-df57-account-create-update-7g5m8" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.687974 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z5426\" (UniqueName: \"kubernetes.io/projected/29ec5ffe-92c2-4f30-8477-913d14b49415-kube-api-access-z5426\") pod \"mysqld-exporter-df57-account-create-update-7g5m8\" (UID: \"29ec5ffe-92c2-4f30-8477-913d14b49415\") " pod="openstack/mysqld-exporter-df57-account-create-update-7g5m8" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.732576 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-5vn9p" Nov 26 22:40:52 crc kubenswrapper[4903]: I1126 22:40:52.844290 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-df57-account-create-update-7g5m8" Nov 26 22:40:53 crc kubenswrapper[4903]: I1126 22:40:53.138375 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-f5z5h" event={"ID":"4fd4368b-8ca7-41fc-afa4-92566225f4f4","Type":"ContainerDied","Data":"5717437173def688903ebfb9e5cc41421f602a5bc8691fb7256401776df600dc"} Nov 26 22:40:53 crc kubenswrapper[4903]: I1126 22:40:53.138559 4903 generic.go:334] "Generic (PLEG): container finished" podID="4fd4368b-8ca7-41fc-afa4-92566225f4f4" containerID="5717437173def688903ebfb9e5cc41421f602a5bc8691fb7256401776df600dc" exitCode=0 Nov 26 22:40:53 crc kubenswrapper[4903]: I1126 22:40:53.425296 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 26 22:40:53 crc kubenswrapper[4903]: I1126 22:40:53.431187 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 26 22:40:53 crc kubenswrapper[4903]: I1126 22:40:53.433446 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 26 22:40:53 crc kubenswrapper[4903]: I1126 22:40:53.433495 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-xssdp" Nov 26 22:40:53 crc kubenswrapper[4903]: I1126 22:40:53.433652 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 26 22:40:53 crc kubenswrapper[4903]: I1126 22:40:53.433720 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 26 22:40:53 crc kubenswrapper[4903]: I1126 22:40:53.442731 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 26 22:40:53 crc kubenswrapper[4903]: I1126 22:40:53.600134 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/1f34b822-e8fa-4f6d-b793-01d0e80ccb06-cache\") pod \"swift-storage-0\" (UID: \"1f34b822-e8fa-4f6d-b793-01d0e80ccb06\") " pod="openstack/swift-storage-0" Nov 26 22:40:53 crc kubenswrapper[4903]: I1126 22:40:53.600460 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1f34b822-e8fa-4f6d-b793-01d0e80ccb06-etc-swift\") pod \"swift-storage-0\" (UID: \"1f34b822-e8fa-4f6d-b793-01d0e80ccb06\") " pod="openstack/swift-storage-0" Nov 26 22:40:53 crc kubenswrapper[4903]: I1126 22:40:53.600517 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"swift-storage-0\" (UID: \"1f34b822-e8fa-4f6d-b793-01d0e80ccb06\") " pod="openstack/swift-storage-0" Nov 26 22:40:53 crc kubenswrapper[4903]: I1126 22:40:53.600544 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r78pf\" (UniqueName: \"kubernetes.io/projected/1f34b822-e8fa-4f6d-b793-01d0e80ccb06-kube-api-access-r78pf\") pod \"swift-storage-0\" (UID: \"1f34b822-e8fa-4f6d-b793-01d0e80ccb06\") " pod="openstack/swift-storage-0" Nov 26 22:40:53 crc kubenswrapper[4903]: I1126 22:40:53.600579 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/1f34b822-e8fa-4f6d-b793-01d0e80ccb06-lock\") pod \"swift-storage-0\" (UID: \"1f34b822-e8fa-4f6d-b793-01d0e80ccb06\") " pod="openstack/swift-storage-0" Nov 26 22:40:53 crc kubenswrapper[4903]: I1126 22:40:53.702118 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"swift-storage-0\" (UID: \"1f34b822-e8fa-4f6d-b793-01d0e80ccb06\") " pod="openstack/swift-storage-0" Nov 26 22:40:53 crc kubenswrapper[4903]: I1126 22:40:53.702186 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r78pf\" (UniqueName: \"kubernetes.io/projected/1f34b822-e8fa-4f6d-b793-01d0e80ccb06-kube-api-access-r78pf\") pod \"swift-storage-0\" (UID: \"1f34b822-e8fa-4f6d-b793-01d0e80ccb06\") " pod="openstack/swift-storage-0" Nov 26 22:40:53 crc kubenswrapper[4903]: I1126 22:40:53.702219 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/1f34b822-e8fa-4f6d-b793-01d0e80ccb06-lock\") pod \"swift-storage-0\" (UID: \"1f34b822-e8fa-4f6d-b793-01d0e80ccb06\") " pod="openstack/swift-storage-0" Nov 26 22:40:53 crc kubenswrapper[4903]: I1126 22:40:53.702270 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/1f34b822-e8fa-4f6d-b793-01d0e80ccb06-cache\") pod \"swift-storage-0\" (UID: \"1f34b822-e8fa-4f6d-b793-01d0e80ccb06\") " pod="openstack/swift-storage-0" Nov 26 22:40:53 crc kubenswrapper[4903]: I1126 22:40:53.702346 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1f34b822-e8fa-4f6d-b793-01d0e80ccb06-etc-swift\") pod \"swift-storage-0\" (UID: \"1f34b822-e8fa-4f6d-b793-01d0e80ccb06\") " pod="openstack/swift-storage-0" Nov 26 22:40:53 crc kubenswrapper[4903]: E1126 22:40:53.702478 4903 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 22:40:53 crc kubenswrapper[4903]: E1126 22:40:53.702491 4903 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 22:40:53 crc kubenswrapper[4903]: E1126 22:40:53.702553 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1f34b822-e8fa-4f6d-b793-01d0e80ccb06-etc-swift podName:1f34b822-e8fa-4f6d-b793-01d0e80ccb06 nodeName:}" failed. No retries permitted until 2025-11-26 22:40:54.202535963 +0000 UTC m=+1182.892770873 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1f34b822-e8fa-4f6d-b793-01d0e80ccb06-etc-swift") pod "swift-storage-0" (UID: "1f34b822-e8fa-4f6d-b793-01d0e80ccb06") : configmap "swift-ring-files" not found Nov 26 22:40:53 crc kubenswrapper[4903]: I1126 22:40:53.702995 4903 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"swift-storage-0\" (UID: \"1f34b822-e8fa-4f6d-b793-01d0e80ccb06\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/swift-storage-0" Nov 26 22:40:53 crc kubenswrapper[4903]: I1126 22:40:53.703885 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/1f34b822-e8fa-4f6d-b793-01d0e80ccb06-lock\") pod \"swift-storage-0\" (UID: \"1f34b822-e8fa-4f6d-b793-01d0e80ccb06\") " pod="openstack/swift-storage-0" Nov 26 22:40:53 crc kubenswrapper[4903]: I1126 22:40:53.704375 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/1f34b822-e8fa-4f6d-b793-01d0e80ccb06-cache\") pod \"swift-storage-0\" (UID: \"1f34b822-e8fa-4f6d-b793-01d0e80ccb06\") " pod="openstack/swift-storage-0" Nov 26 22:40:53 crc kubenswrapper[4903]: I1126 22:40:53.740782 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r78pf\" (UniqueName: \"kubernetes.io/projected/1f34b822-e8fa-4f6d-b793-01d0e80ccb06-kube-api-access-r78pf\") pod \"swift-storage-0\" (UID: \"1f34b822-e8fa-4f6d-b793-01d0e80ccb06\") " pod="openstack/swift-storage-0" Nov 26 22:40:53 crc kubenswrapper[4903]: I1126 22:40:53.749924 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"swift-storage-0\" (UID: \"1f34b822-e8fa-4f6d-b793-01d0e80ccb06\") " pod="openstack/swift-storage-0" Nov 26 22:40:53 crc kubenswrapper[4903]: I1126 22:40:53.824412 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-wp2ng"] Nov 26 22:40:53 crc kubenswrapper[4903]: I1126 22:40:53.825637 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-wp2ng" Nov 26 22:40:53 crc kubenswrapper[4903]: I1126 22:40:53.827251 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 26 22:40:53 crc kubenswrapper[4903]: I1126 22:40:53.827740 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 26 22:40:53 crc kubenswrapper[4903]: I1126 22:40:53.832341 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 26 22:40:53 crc kubenswrapper[4903]: I1126 22:40:53.844165 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-wp2ng"] Nov 26 22:40:54 crc kubenswrapper[4903]: I1126 22:40:54.008433 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbgcc\" (UniqueName: \"kubernetes.io/projected/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-kube-api-access-vbgcc\") pod \"swift-ring-rebalance-wp2ng\" (UID: \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\") " pod="openstack/swift-ring-rebalance-wp2ng" Nov 26 22:40:54 crc kubenswrapper[4903]: I1126 22:40:54.008580 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-etc-swift\") pod \"swift-ring-rebalance-wp2ng\" (UID: \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\") " pod="openstack/swift-ring-rebalance-wp2ng" Nov 26 22:40:54 crc kubenswrapper[4903]: I1126 22:40:54.008622 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-dispersionconf\") pod \"swift-ring-rebalance-wp2ng\" (UID: \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\") " pod="openstack/swift-ring-rebalance-wp2ng" Nov 26 22:40:54 crc kubenswrapper[4903]: I1126 22:40:54.008647 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-scripts\") pod \"swift-ring-rebalance-wp2ng\" (UID: \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\") " pod="openstack/swift-ring-rebalance-wp2ng" Nov 26 22:40:54 crc kubenswrapper[4903]: I1126 22:40:54.008826 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-ring-data-devices\") pod \"swift-ring-rebalance-wp2ng\" (UID: \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\") " pod="openstack/swift-ring-rebalance-wp2ng" Nov 26 22:40:54 crc kubenswrapper[4903]: I1126 22:40:54.008896 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-combined-ca-bundle\") pod \"swift-ring-rebalance-wp2ng\" (UID: \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\") " pod="openstack/swift-ring-rebalance-wp2ng" Nov 26 22:40:54 crc kubenswrapper[4903]: I1126 22:40:54.008936 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-swiftconf\") pod \"swift-ring-rebalance-wp2ng\" (UID: \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\") " pod="openstack/swift-ring-rebalance-wp2ng" Nov 26 22:40:54 crc kubenswrapper[4903]: I1126 22:40:54.111044 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-etc-swift\") pod \"swift-ring-rebalance-wp2ng\" (UID: \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\") " pod="openstack/swift-ring-rebalance-wp2ng" Nov 26 22:40:54 crc kubenswrapper[4903]: I1126 22:40:54.111103 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-dispersionconf\") pod \"swift-ring-rebalance-wp2ng\" (UID: \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\") " pod="openstack/swift-ring-rebalance-wp2ng" Nov 26 22:40:54 crc kubenswrapper[4903]: I1126 22:40:54.111130 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-scripts\") pod \"swift-ring-rebalance-wp2ng\" (UID: \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\") " pod="openstack/swift-ring-rebalance-wp2ng" Nov 26 22:40:54 crc kubenswrapper[4903]: I1126 22:40:54.111180 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-ring-data-devices\") pod \"swift-ring-rebalance-wp2ng\" (UID: \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\") " pod="openstack/swift-ring-rebalance-wp2ng" Nov 26 22:40:54 crc kubenswrapper[4903]: I1126 22:40:54.111214 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-combined-ca-bundle\") pod \"swift-ring-rebalance-wp2ng\" (UID: \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\") " pod="openstack/swift-ring-rebalance-wp2ng" Nov 26 22:40:54 crc kubenswrapper[4903]: I1126 22:40:54.111248 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-swiftconf\") pod \"swift-ring-rebalance-wp2ng\" (UID: \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\") " pod="openstack/swift-ring-rebalance-wp2ng" Nov 26 22:40:54 crc kubenswrapper[4903]: I1126 22:40:54.111278 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbgcc\" (UniqueName: \"kubernetes.io/projected/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-kube-api-access-vbgcc\") pod \"swift-ring-rebalance-wp2ng\" (UID: \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\") " pod="openstack/swift-ring-rebalance-wp2ng" Nov 26 22:40:54 crc kubenswrapper[4903]: I1126 22:40:54.111540 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-etc-swift\") pod \"swift-ring-rebalance-wp2ng\" (UID: \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\") " pod="openstack/swift-ring-rebalance-wp2ng" Nov 26 22:40:54 crc kubenswrapper[4903]: I1126 22:40:54.111872 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-ring-data-devices\") pod \"swift-ring-rebalance-wp2ng\" (UID: \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\") " pod="openstack/swift-ring-rebalance-wp2ng" Nov 26 22:40:54 crc kubenswrapper[4903]: I1126 22:40:54.112319 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-scripts\") pod \"swift-ring-rebalance-wp2ng\" (UID: \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\") " pod="openstack/swift-ring-rebalance-wp2ng" Nov 26 22:40:54 crc kubenswrapper[4903]: I1126 22:40:54.114945 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-combined-ca-bundle\") pod \"swift-ring-rebalance-wp2ng\" (UID: \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\") " pod="openstack/swift-ring-rebalance-wp2ng" Nov 26 22:40:54 crc kubenswrapper[4903]: I1126 22:40:54.115123 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-dispersionconf\") pod \"swift-ring-rebalance-wp2ng\" (UID: \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\") " pod="openstack/swift-ring-rebalance-wp2ng" Nov 26 22:40:54 crc kubenswrapper[4903]: I1126 22:40:54.119614 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-swiftconf\") pod \"swift-ring-rebalance-wp2ng\" (UID: \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\") " pod="openstack/swift-ring-rebalance-wp2ng" Nov 26 22:40:54 crc kubenswrapper[4903]: I1126 22:40:54.125624 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbgcc\" (UniqueName: \"kubernetes.io/projected/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-kube-api-access-vbgcc\") pod \"swift-ring-rebalance-wp2ng\" (UID: \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\") " pod="openstack/swift-ring-rebalance-wp2ng" Nov 26 22:40:54 crc kubenswrapper[4903]: I1126 22:40:54.143469 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-wp2ng" Nov 26 22:40:54 crc kubenswrapper[4903]: I1126 22:40:54.213009 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1f34b822-e8fa-4f6d-b793-01d0e80ccb06-etc-swift\") pod \"swift-storage-0\" (UID: \"1f34b822-e8fa-4f6d-b793-01d0e80ccb06\") " pod="openstack/swift-storage-0" Nov 26 22:40:54 crc kubenswrapper[4903]: E1126 22:40:54.213251 4903 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 22:40:54 crc kubenswrapper[4903]: E1126 22:40:54.213295 4903 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 22:40:54 crc kubenswrapper[4903]: E1126 22:40:54.213363 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1f34b822-e8fa-4f6d-b793-01d0e80ccb06-etc-swift podName:1f34b822-e8fa-4f6d-b793-01d0e80ccb06 nodeName:}" failed. No retries permitted until 2025-11-26 22:40:55.21334147 +0000 UTC m=+1183.903576380 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1f34b822-e8fa-4f6d-b793-01d0e80ccb06-etc-swift") pod "swift-storage-0" (UID: "1f34b822-e8fa-4f6d-b793-01d0e80ccb06") : configmap "swift-ring-files" not found Nov 26 22:40:54 crc kubenswrapper[4903]: I1126 22:40:54.815673 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-86db49b7ff-f5z5h" podUID="4fd4368b-8ca7-41fc-afa4-92566225f4f4" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.144:5353: connect: connection refused" Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.235967 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1f34b822-e8fa-4f6d-b793-01d0e80ccb06-etc-swift\") pod \"swift-storage-0\" (UID: \"1f34b822-e8fa-4f6d-b793-01d0e80ccb06\") " pod="openstack/swift-storage-0" Nov 26 22:40:55 crc kubenswrapper[4903]: E1126 22:40:55.236147 4903 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 22:40:55 crc kubenswrapper[4903]: E1126 22:40:55.236171 4903 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 22:40:55 crc kubenswrapper[4903]: E1126 22:40:55.236224 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1f34b822-e8fa-4f6d-b793-01d0e80ccb06-etc-swift podName:1f34b822-e8fa-4f6d-b793-01d0e80ccb06 nodeName:}" failed. No retries permitted until 2025-11-26 22:40:57.23620806 +0000 UTC m=+1185.926442970 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1f34b822-e8fa-4f6d-b793-01d0e80ccb06-etc-swift") pod "swift-storage-0" (UID: "1f34b822-e8fa-4f6d-b793-01d0e80ccb06") : configmap "swift-ring-files" not found Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.286308 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-zcdbt"] Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.288415 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-zcdbt" Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.294729 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-zcdbt"] Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.392684 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-d34e-account-create-update-2z8vz"] Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.394918 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-d34e-account-create-update-2z8vz" Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.400739 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.422820 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-d34e-account-create-update-2z8vz"] Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.439916 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4f687978-0ef1-4061-98ed-a8684824ece8-operator-scripts\") pod \"glance-db-create-zcdbt\" (UID: \"4f687978-0ef1-4061-98ed-a8684824ece8\") " pod="openstack/glance-db-create-zcdbt" Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.440024 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s465z\" (UniqueName: \"kubernetes.io/projected/4f687978-0ef1-4061-98ed-a8684824ece8-kube-api-access-s465z\") pod \"glance-db-create-zcdbt\" (UID: \"4f687978-0ef1-4061-98ed-a8684824ece8\") " pod="openstack/glance-db-create-zcdbt" Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.477785 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-f5z5h" Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.541847 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s465z\" (UniqueName: \"kubernetes.io/projected/4f687978-0ef1-4061-98ed-a8684824ece8-kube-api-access-s465z\") pod \"glance-db-create-zcdbt\" (UID: \"4f687978-0ef1-4061-98ed-a8684824ece8\") " pod="openstack/glance-db-create-zcdbt" Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.541908 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1a6da44-50e3-4b0d-8062-aafa8b65aaaf-operator-scripts\") pod \"glance-d34e-account-create-update-2z8vz\" (UID: \"b1a6da44-50e3-4b0d-8062-aafa8b65aaaf\") " pod="openstack/glance-d34e-account-create-update-2z8vz" Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.541969 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tml9q\" (UniqueName: \"kubernetes.io/projected/b1a6da44-50e3-4b0d-8062-aafa8b65aaaf-kube-api-access-tml9q\") pod \"glance-d34e-account-create-update-2z8vz\" (UID: \"b1a6da44-50e3-4b0d-8062-aafa8b65aaaf\") " pod="openstack/glance-d34e-account-create-update-2z8vz" Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.542040 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4f687978-0ef1-4061-98ed-a8684824ece8-operator-scripts\") pod \"glance-db-create-zcdbt\" (UID: \"4f687978-0ef1-4061-98ed-a8684824ece8\") " pod="openstack/glance-db-create-zcdbt" Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.542710 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4f687978-0ef1-4061-98ed-a8684824ece8-operator-scripts\") pod \"glance-db-create-zcdbt\" (UID: \"4f687978-0ef1-4061-98ed-a8684824ece8\") " pod="openstack/glance-db-create-zcdbt" Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.575531 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s465z\" (UniqueName: \"kubernetes.io/projected/4f687978-0ef1-4061-98ed-a8684824ece8-kube-api-access-s465z\") pod \"glance-db-create-zcdbt\" (UID: \"4f687978-0ef1-4061-98ed-a8684824ece8\") " pod="openstack/glance-db-create-zcdbt" Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.643818 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h4hns\" (UniqueName: \"kubernetes.io/projected/4fd4368b-8ca7-41fc-afa4-92566225f4f4-kube-api-access-h4hns\") pod \"4fd4368b-8ca7-41fc-afa4-92566225f4f4\" (UID: \"4fd4368b-8ca7-41fc-afa4-92566225f4f4\") " Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.643865 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4fd4368b-8ca7-41fc-afa4-92566225f4f4-ovsdbserver-nb\") pod \"4fd4368b-8ca7-41fc-afa4-92566225f4f4\" (UID: \"4fd4368b-8ca7-41fc-afa4-92566225f4f4\") " Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.644053 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4fd4368b-8ca7-41fc-afa4-92566225f4f4-ovsdbserver-sb\") pod \"4fd4368b-8ca7-41fc-afa4-92566225f4f4\" (UID: \"4fd4368b-8ca7-41fc-afa4-92566225f4f4\") " Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.644094 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fd4368b-8ca7-41fc-afa4-92566225f4f4-config\") pod \"4fd4368b-8ca7-41fc-afa4-92566225f4f4\" (UID: \"4fd4368b-8ca7-41fc-afa4-92566225f4f4\") " Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.644110 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4fd4368b-8ca7-41fc-afa4-92566225f4f4-dns-svc\") pod \"4fd4368b-8ca7-41fc-afa4-92566225f4f4\" (UID: \"4fd4368b-8ca7-41fc-afa4-92566225f4f4\") " Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.644427 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1a6da44-50e3-4b0d-8062-aafa8b65aaaf-operator-scripts\") pod \"glance-d34e-account-create-update-2z8vz\" (UID: \"b1a6da44-50e3-4b0d-8062-aafa8b65aaaf\") " pod="openstack/glance-d34e-account-create-update-2z8vz" Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.644489 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tml9q\" (UniqueName: \"kubernetes.io/projected/b1a6da44-50e3-4b0d-8062-aafa8b65aaaf-kube-api-access-tml9q\") pod \"glance-d34e-account-create-update-2z8vz\" (UID: \"b1a6da44-50e3-4b0d-8062-aafa8b65aaaf\") " pod="openstack/glance-d34e-account-create-update-2z8vz" Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.650041 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1a6da44-50e3-4b0d-8062-aafa8b65aaaf-operator-scripts\") pod \"glance-d34e-account-create-update-2z8vz\" (UID: \"b1a6da44-50e3-4b0d-8062-aafa8b65aaaf\") " pod="openstack/glance-d34e-account-create-update-2z8vz" Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.652818 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4fd4368b-8ca7-41fc-afa4-92566225f4f4-kube-api-access-h4hns" (OuterVolumeSpecName: "kube-api-access-h4hns") pod "4fd4368b-8ca7-41fc-afa4-92566225f4f4" (UID: "4fd4368b-8ca7-41fc-afa4-92566225f4f4"). InnerVolumeSpecName "kube-api-access-h4hns". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.666418 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tml9q\" (UniqueName: \"kubernetes.io/projected/b1a6da44-50e3-4b0d-8062-aafa8b65aaaf-kube-api-access-tml9q\") pod \"glance-d34e-account-create-update-2z8vz\" (UID: \"b1a6da44-50e3-4b0d-8062-aafa8b65aaaf\") " pod="openstack/glance-d34e-account-create-update-2z8vz" Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.716426 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-zcdbt" Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.751901 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h4hns\" (UniqueName: \"kubernetes.io/projected/4fd4368b-8ca7-41fc-afa4-92566225f4f4-kube-api-access-h4hns\") on node \"crc\" DevicePath \"\"" Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.776841 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-d34e-account-create-update-2z8vz" Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.789341 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fd4368b-8ca7-41fc-afa4-92566225f4f4-config" (OuterVolumeSpecName: "config") pod "4fd4368b-8ca7-41fc-afa4-92566225f4f4" (UID: "4fd4368b-8ca7-41fc-afa4-92566225f4f4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.814326 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fd4368b-8ca7-41fc-afa4-92566225f4f4-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4fd4368b-8ca7-41fc-afa4-92566225f4f4" (UID: "4fd4368b-8ca7-41fc-afa4-92566225f4f4"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.815431 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fd4368b-8ca7-41fc-afa4-92566225f4f4-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "4fd4368b-8ca7-41fc-afa4-92566225f4f4" (UID: "4fd4368b-8ca7-41fc-afa4-92566225f4f4"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.816037 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fd4368b-8ca7-41fc-afa4-92566225f4f4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4fd4368b-8ca7-41fc-afa4-92566225f4f4" (UID: "4fd4368b-8ca7-41fc-afa4-92566225f4f4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.857882 4903 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4fd4368b-8ca7-41fc-afa4-92566225f4f4-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.857922 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fd4368b-8ca7-41fc-afa4-92566225f4f4-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.857932 4903 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4fd4368b-8ca7-41fc-afa4-92566225f4f4-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.857944 4903 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4fd4368b-8ca7-41fc-afa4-92566225f4f4-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.883966 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-5vn9p"] Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.898145 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-829mx"] Nov 26 22:40:55 crc kubenswrapper[4903]: I1126 22:40:55.918978 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-df57-account-create-update-7g5m8"] Nov 26 22:40:55 crc kubenswrapper[4903]: W1126 22:40:55.930314 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod29ec5ffe_92c2_4f30_8477_913d14b49415.slice/crio-9c585224804b306e75aaacd68dfc3282db5207c602d656e12610310f62e29b9a WatchSource:0}: Error finding container 9c585224804b306e75aaacd68dfc3282db5207c602d656e12610310f62e29b9a: Status 404 returned error can't find the container with id 9c585224804b306e75aaacd68dfc3282db5207c602d656e12610310f62e29b9a Nov 26 22:40:56 crc kubenswrapper[4903]: I1126 22:40:56.132994 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-wp2ng"] Nov 26 22:40:56 crc kubenswrapper[4903]: I1126 22:40:56.177749 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-wp2ng" event={"ID":"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc","Type":"ContainerStarted","Data":"86744a073028475bd70b98028922f20bf2964d15937ef1622958cad3ba1e3e07"} Nov 26 22:40:56 crc kubenswrapper[4903]: I1126 22:40:56.179811 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f58d4082-e69c-44e2-9961-9842cb738869","Type":"ContainerStarted","Data":"f8f5bf943f03607d238dccf33bd9dd7056c3a36cd9bc2079ff293714a3c2b7ae"} Nov 26 22:40:56 crc kubenswrapper[4903]: I1126 22:40:56.180867 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 26 22:40:56 crc kubenswrapper[4903]: I1126 22:40:56.181517 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"4d75ba9f-0873-4d65-b0c9-5347134bfcce","Type":"ContainerStarted","Data":"a6a19313ea9c260a0231b410b7ade60ed9788d7161b5121cf0204087d4d1df8d"} Nov 26 22:40:56 crc kubenswrapper[4903]: I1126 22:40:56.182226 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-df57-account-create-update-7g5m8" event={"ID":"29ec5ffe-92c2-4f30-8477-913d14b49415","Type":"ContainerStarted","Data":"9c585224804b306e75aaacd68dfc3282db5207c602d656e12610310f62e29b9a"} Nov 26 22:40:56 crc kubenswrapper[4903]: I1126 22:40:56.183651 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-f5z5h" event={"ID":"4fd4368b-8ca7-41fc-afa4-92566225f4f4","Type":"ContainerDied","Data":"6330f10522ac50a9e40d8f066434a422c6ff0f25a4b68f5b8c0ebca2ab48a8ec"} Nov 26 22:40:56 crc kubenswrapper[4903]: I1126 22:40:56.183679 4903 scope.go:117] "RemoveContainer" containerID="5717437173def688903ebfb9e5cc41421f602a5bc8691fb7256401776df600dc" Nov 26 22:40:56 crc kubenswrapper[4903]: I1126 22:40:56.183825 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-f5z5h" Nov 26 22:40:56 crc kubenswrapper[4903]: I1126 22:40:56.192518 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-5vn9p" event={"ID":"96fe1cef-e83b-48c2-8731-f0c74f19ce91","Type":"ContainerStarted","Data":"42e0cf19c1b6f510e2be0ec6c8fa79a1337725df2eee393aec718ae7dcf1718b"} Nov 26 22:40:56 crc kubenswrapper[4903]: I1126 22:40:56.203114 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-829mx" event={"ID":"84562f82-5408-4031-be94-2933a87dd5b0","Type":"ContainerStarted","Data":"6ac700e941269c1124b9947be2da1a22d2f4a7cf2f140310bc84e7ae6940291c"} Nov 26 22:40:56 crc kubenswrapper[4903]: I1126 22:40:56.203155 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-829mx" event={"ID":"84562f82-5408-4031-be94-2933a87dd5b0","Type":"ContainerStarted","Data":"f724686a4d5a464b4124fe6ec12113b3ec01b631a9d703f1716e2f9888ff0ee8"} Nov 26 22:40:56 crc kubenswrapper[4903]: I1126 22:40:56.208957 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=13.504335833 podStartE2EDuration="44.208928456s" podCreationTimestamp="2025-11-26 22:40:12 +0000 UTC" firstStartedPulling="2025-11-26 22:40:24.603046298 +0000 UTC m=+1153.293281208" lastFinishedPulling="2025-11-26 22:40:55.307638921 +0000 UTC m=+1183.997873831" observedRunningTime="2025-11-26 22:40:56.192968469 +0000 UTC m=+1184.883203379" watchObservedRunningTime="2025-11-26 22:40:56.208928456 +0000 UTC m=+1184.899163376" Nov 26 22:40:56 crc kubenswrapper[4903]: I1126 22:40:56.224328 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-openstack-db-create-829mx" podStartSLOduration=4.224314448 podStartE2EDuration="4.224314448s" podCreationTimestamp="2025-11-26 22:40:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:40:56.221138673 +0000 UTC m=+1184.911373583" watchObservedRunningTime="2025-11-26 22:40:56.224314448 +0000 UTC m=+1184.914549358" Nov 26 22:40:56 crc kubenswrapper[4903]: I1126 22:40:56.242655 4903 scope.go:117] "RemoveContainer" containerID="491eca65525151d0bec441ff3ff53c2e17ebb0592b00e064996eb049bd50171a" Nov 26 22:40:56 crc kubenswrapper[4903]: I1126 22:40:56.248282 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-f5z5h"] Nov 26 22:40:56 crc kubenswrapper[4903]: I1126 22:40:56.275916 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-f5z5h"] Nov 26 22:40:56 crc kubenswrapper[4903]: I1126 22:40:56.277088 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-zcdbt"] Nov 26 22:40:56 crc kubenswrapper[4903]: W1126 22:40:56.314955 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4f687978_0ef1_4061_98ed_a8684824ece8.slice/crio-5340cbd1510aa0fa4e138308f0af7f7138f32eeb31317fc921d9229847a997b2 WatchSource:0}: Error finding container 5340cbd1510aa0fa4e138308f0af7f7138f32eeb31317fc921d9229847a997b2: Status 404 returned error can't find the container with id 5340cbd1510aa0fa4e138308f0af7f7138f32eeb31317fc921d9229847a997b2 Nov 26 22:40:56 crc kubenswrapper[4903]: I1126 22:40:56.411309 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-d34e-account-create-update-2z8vz"] Nov 26 22:40:56 crc kubenswrapper[4903]: W1126 22:40:56.420972 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb1a6da44_50e3_4b0d_8062_aafa8b65aaaf.slice/crio-76cbae9c9efce3e3a0e4d6ced21bfbedf8455c6a05ae9f043f0f36c970c228b3 WatchSource:0}: Error finding container 76cbae9c9efce3e3a0e4d6ced21bfbedf8455c6a05ae9f043f0f36c970c228b3: Status 404 returned error can't find the container with id 76cbae9c9efce3e3a0e4d6ced21bfbedf8455c6a05ae9f043f0f36c970c228b3 Nov 26 22:40:56 crc kubenswrapper[4903]: E1126 22:40:56.922426 4903 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4f687978_0ef1_4061_98ed_a8684824ece8.slice/crio-conmon-d1867e44d3619f4f752d2bb02a07c0a0fd935e0df9c9fffa693e4e6c2fce3903.scope\": RecentStats: unable to find data in memory cache]" Nov 26 22:40:57 crc kubenswrapper[4903]: I1126 22:40:57.226203 4903 generic.go:334] "Generic (PLEG): container finished" podID="b1a6da44-50e3-4b0d-8062-aafa8b65aaaf" containerID="7caf44671443d54e66db2aff9ab1f003babdf73860d4f8082d9d6c3b88ef1fef" exitCode=0 Nov 26 22:40:57 crc kubenswrapper[4903]: I1126 22:40:57.226298 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-d34e-account-create-update-2z8vz" event={"ID":"b1a6da44-50e3-4b0d-8062-aafa8b65aaaf","Type":"ContainerDied","Data":"7caf44671443d54e66db2aff9ab1f003babdf73860d4f8082d9d6c3b88ef1fef"} Nov 26 22:40:57 crc kubenswrapper[4903]: I1126 22:40:57.226343 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-d34e-account-create-update-2z8vz" event={"ID":"b1a6da44-50e3-4b0d-8062-aafa8b65aaaf","Type":"ContainerStarted","Data":"76cbae9c9efce3e3a0e4d6ced21bfbedf8455c6a05ae9f043f0f36c970c228b3"} Nov 26 22:40:57 crc kubenswrapper[4903]: I1126 22:40:57.232259 4903 generic.go:334] "Generic (PLEG): container finished" podID="4f687978-0ef1-4061-98ed-a8684824ece8" containerID="d1867e44d3619f4f752d2bb02a07c0a0fd935e0df9c9fffa693e4e6c2fce3903" exitCode=0 Nov 26 22:40:57 crc kubenswrapper[4903]: I1126 22:40:57.232324 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-zcdbt" event={"ID":"4f687978-0ef1-4061-98ed-a8684824ece8","Type":"ContainerDied","Data":"d1867e44d3619f4f752d2bb02a07c0a0fd935e0df9c9fffa693e4e6c2fce3903"} Nov 26 22:40:57 crc kubenswrapper[4903]: I1126 22:40:57.232348 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-zcdbt" event={"ID":"4f687978-0ef1-4061-98ed-a8684824ece8","Type":"ContainerStarted","Data":"5340cbd1510aa0fa4e138308f0af7f7138f32eeb31317fc921d9229847a997b2"} Nov 26 22:40:57 crc kubenswrapper[4903]: I1126 22:40:57.233738 4903 generic.go:334] "Generic (PLEG): container finished" podID="29ec5ffe-92c2-4f30-8477-913d14b49415" containerID="7dc4941a306483ee82b589f5308302beff0c8df2896d9f429220f03853ced321" exitCode=0 Nov 26 22:40:57 crc kubenswrapper[4903]: I1126 22:40:57.233827 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-df57-account-create-update-7g5m8" event={"ID":"29ec5ffe-92c2-4f30-8477-913d14b49415","Type":"ContainerDied","Data":"7dc4941a306483ee82b589f5308302beff0c8df2896d9f429220f03853ced321"} Nov 26 22:40:57 crc kubenswrapper[4903]: I1126 22:40:57.239169 4903 generic.go:334] "Generic (PLEG): container finished" podID="96fe1cef-e83b-48c2-8731-f0c74f19ce91" containerID="f0b9d8b32473b2967c9f1661d8597d03dd65fb4934bff21ffaf523d317aa5871" exitCode=0 Nov 26 22:40:57 crc kubenswrapper[4903]: I1126 22:40:57.239242 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-5vn9p" event={"ID":"96fe1cef-e83b-48c2-8731-f0c74f19ce91","Type":"ContainerDied","Data":"f0b9d8b32473b2967c9f1661d8597d03dd65fb4934bff21ffaf523d317aa5871"} Nov 26 22:40:57 crc kubenswrapper[4903]: I1126 22:40:57.242285 4903 generic.go:334] "Generic (PLEG): container finished" podID="a97b1b29-2461-47c7-a3f9-71837fe03413" containerID="04e34959aebfbba31367cd19f0fe0bba4187ce0a734a6ea30825e6142070d487" exitCode=0 Nov 26 22:40:57 crc kubenswrapper[4903]: I1126 22:40:57.242340 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"a97b1b29-2461-47c7-a3f9-71837fe03413","Type":"ContainerDied","Data":"04e34959aebfbba31367cd19f0fe0bba4187ce0a734a6ea30825e6142070d487"} Nov 26 22:40:57 crc kubenswrapper[4903]: I1126 22:40:57.246856 4903 generic.go:334] "Generic (PLEG): container finished" podID="84562f82-5408-4031-be94-2933a87dd5b0" containerID="6ac700e941269c1124b9947be2da1a22d2f4a7cf2f140310bc84e7ae6940291c" exitCode=0 Nov 26 22:40:57 crc kubenswrapper[4903]: I1126 22:40:57.247569 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-829mx" event={"ID":"84562f82-5408-4031-be94-2933a87dd5b0","Type":"ContainerDied","Data":"6ac700e941269c1124b9947be2da1a22d2f4a7cf2f140310bc84e7ae6940291c"} Nov 26 22:40:57 crc kubenswrapper[4903]: I1126 22:40:57.285391 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1f34b822-e8fa-4f6d-b793-01d0e80ccb06-etc-swift\") pod \"swift-storage-0\" (UID: \"1f34b822-e8fa-4f6d-b793-01d0e80ccb06\") " pod="openstack/swift-storage-0" Nov 26 22:40:57 crc kubenswrapper[4903]: E1126 22:40:57.285617 4903 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 22:40:57 crc kubenswrapper[4903]: E1126 22:40:57.285672 4903 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 22:40:57 crc kubenswrapper[4903]: E1126 22:40:57.285760 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1f34b822-e8fa-4f6d-b793-01d0e80ccb06-etc-swift podName:1f34b822-e8fa-4f6d-b793-01d0e80ccb06 nodeName:}" failed. No retries permitted until 2025-11-26 22:41:01.285739509 +0000 UTC m=+1189.975974419 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1f34b822-e8fa-4f6d-b793-01d0e80ccb06-etc-swift") pod "swift-storage-0" (UID: "1f34b822-e8fa-4f6d-b793-01d0e80ccb06") : configmap "swift-ring-files" not found Nov 26 22:40:58 crc kubenswrapper[4903]: I1126 22:40:58.044713 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4fd4368b-8ca7-41fc-afa4-92566225f4f4" path="/var/lib/kubelet/pods/4fd4368b-8ca7-41fc-afa4-92566225f4f4/volumes" Nov 26 22:40:58 crc kubenswrapper[4903]: I1126 22:40:58.263257 4903 generic.go:334] "Generic (PLEG): container finished" podID="910b9022-54fc-4f7d-b69b-bdb7661cb91d" containerID="4dd0882b808a2194123234623a000bbdf57499bf825ea99e6092344a44365cc1" exitCode=0 Nov 26 22:40:58 crc kubenswrapper[4903]: I1126 22:40:58.263330 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"910b9022-54fc-4f7d-b69b-bdb7661cb91d","Type":"ContainerDied","Data":"4dd0882b808a2194123234623a000bbdf57499bf825ea99e6092344a44365cc1"} Nov 26 22:40:58 crc kubenswrapper[4903]: I1126 22:40:58.266220 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"a97b1b29-2461-47c7-a3f9-71837fe03413","Type":"ContainerStarted","Data":"2d3df56702b17befc1893f007a5b55c5842827f405f47c1858ec09e3ca3f571a"} Nov 26 22:40:58 crc kubenswrapper[4903]: I1126 22:40:58.266450 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 26 22:40:58 crc kubenswrapper[4903]: I1126 22:40:58.273624 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-5vn9p" event={"ID":"96fe1cef-e83b-48c2-8731-f0c74f19ce91","Type":"ContainerStarted","Data":"b499924691b3488d080ff29b7d923feae092254ae6bad60d20573a2b0f2beac4"} Nov 26 22:40:58 crc kubenswrapper[4903]: I1126 22:40:58.273927 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-698758b865-5vn9p" Nov 26 22:40:58 crc kubenswrapper[4903]: I1126 22:40:58.315013 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=39.846436026 podStartE2EDuration="53.314990499s" podCreationTimestamp="2025-11-26 22:40:05 +0000 UTC" firstStartedPulling="2025-11-26 22:40:09.967186364 +0000 UTC m=+1138.657421274" lastFinishedPulling="2025-11-26 22:40:23.435740837 +0000 UTC m=+1152.125975747" observedRunningTime="2025-11-26 22:40:58.314525296 +0000 UTC m=+1187.004760206" watchObservedRunningTime="2025-11-26 22:40:58.314990499 +0000 UTC m=+1187.005225419" Nov 26 22:40:58 crc kubenswrapper[4903]: I1126 22:40:58.932741 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-d87bcd8c7-tgv7l" podUID="3ef46889-0257-408b-8f72-b8985eacb494" containerName="console" containerID="cri-o://017852a728600949ccb133ee9540cfa26374139c03a9d09f828ca85584962622" gracePeriod=15 Nov 26 22:40:59 crc kubenswrapper[4903]: I1126 22:40:59.286312 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-d87bcd8c7-tgv7l_3ef46889-0257-408b-8f72-b8985eacb494/console/0.log" Nov 26 22:40:59 crc kubenswrapper[4903]: I1126 22:40:59.286440 4903 generic.go:334] "Generic (PLEG): container finished" podID="3ef46889-0257-408b-8f72-b8985eacb494" containerID="017852a728600949ccb133ee9540cfa26374139c03a9d09f828ca85584962622" exitCode=2 Nov 26 22:40:59 crc kubenswrapper[4903]: I1126 22:40:59.286577 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-d87bcd8c7-tgv7l" event={"ID":"3ef46889-0257-408b-8f72-b8985eacb494","Type":"ContainerDied","Data":"017852a728600949ccb133ee9540cfa26374139c03a9d09f828ca85584962622"} Nov 26 22:40:59 crc kubenswrapper[4903]: I1126 22:40:59.751855 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-698758b865-5vn9p" podStartSLOduration=7.751825124 podStartE2EDuration="7.751825124s" podCreationTimestamp="2025-11-26 22:40:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:40:58.35015718 +0000 UTC m=+1187.040392080" watchObservedRunningTime="2025-11-26 22:40:59.751825124 +0000 UTC m=+1188.442060074" Nov 26 22:40:59 crc kubenswrapper[4903]: I1126 22:40:59.754658 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-pd66t"] Nov 26 22:40:59 crc kubenswrapper[4903]: E1126 22:40:59.755377 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fd4368b-8ca7-41fc-afa4-92566225f4f4" containerName="init" Nov 26 22:40:59 crc kubenswrapper[4903]: I1126 22:40:59.755410 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fd4368b-8ca7-41fc-afa4-92566225f4f4" containerName="init" Nov 26 22:40:59 crc kubenswrapper[4903]: E1126 22:40:59.755463 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fd4368b-8ca7-41fc-afa4-92566225f4f4" containerName="dnsmasq-dns" Nov 26 22:40:59 crc kubenswrapper[4903]: I1126 22:40:59.755478 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fd4368b-8ca7-41fc-afa4-92566225f4f4" containerName="dnsmasq-dns" Nov 26 22:40:59 crc kubenswrapper[4903]: I1126 22:40:59.755943 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fd4368b-8ca7-41fc-afa4-92566225f4f4" containerName="dnsmasq-dns" Nov 26 22:40:59 crc kubenswrapper[4903]: I1126 22:40:59.757235 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-pd66t" Nov 26 22:40:59 crc kubenswrapper[4903]: I1126 22:40:59.776956 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-pd66t"] Nov 26 22:40:59 crc kubenswrapper[4903]: I1126 22:40:59.836159 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-19c9-account-create-update-fkslg"] Nov 26 22:40:59 crc kubenswrapper[4903]: I1126 22:40:59.837776 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-19c9-account-create-update-fkslg" Nov 26 22:40:59 crc kubenswrapper[4903]: I1126 22:40:59.847187 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5fcf8863-3ab3-4617-9d3e-41256bf1c3de-operator-scripts\") pod \"keystone-db-create-pd66t\" (UID: \"5fcf8863-3ab3-4617-9d3e-41256bf1c3de\") " pod="openstack/keystone-db-create-pd66t" Nov 26 22:40:59 crc kubenswrapper[4903]: I1126 22:40:59.847303 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 26 22:40:59 crc kubenswrapper[4903]: I1126 22:40:59.847254 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2rhk\" (UniqueName: \"kubernetes.io/projected/5fcf8863-3ab3-4617-9d3e-41256bf1c3de-kube-api-access-z2rhk\") pod \"keystone-db-create-pd66t\" (UID: \"5fcf8863-3ab3-4617-9d3e-41256bf1c3de\") " pod="openstack/keystone-db-create-pd66t" Nov 26 22:40:59 crc kubenswrapper[4903]: I1126 22:40:59.848288 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-19c9-account-create-update-fkslg"] Nov 26 22:40:59 crc kubenswrapper[4903]: I1126 22:40:59.945080 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 26 22:40:59 crc kubenswrapper[4903]: I1126 22:40:59.948807 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-flzx2\" (UniqueName: \"kubernetes.io/projected/7208d6af-3e83-4569-b654-20c73205bab5-kube-api-access-flzx2\") pod \"keystone-19c9-account-create-update-fkslg\" (UID: \"7208d6af-3e83-4569-b654-20c73205bab5\") " pod="openstack/keystone-19c9-account-create-update-fkslg" Nov 26 22:40:59 crc kubenswrapper[4903]: I1126 22:40:59.948862 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5fcf8863-3ab3-4617-9d3e-41256bf1c3de-operator-scripts\") pod \"keystone-db-create-pd66t\" (UID: \"5fcf8863-3ab3-4617-9d3e-41256bf1c3de\") " pod="openstack/keystone-db-create-pd66t" Nov 26 22:40:59 crc kubenswrapper[4903]: I1126 22:40:59.948888 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2rhk\" (UniqueName: \"kubernetes.io/projected/5fcf8863-3ab3-4617-9d3e-41256bf1c3de-kube-api-access-z2rhk\") pod \"keystone-db-create-pd66t\" (UID: \"5fcf8863-3ab3-4617-9d3e-41256bf1c3de\") " pod="openstack/keystone-db-create-pd66t" Nov 26 22:40:59 crc kubenswrapper[4903]: I1126 22:40:59.948933 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7208d6af-3e83-4569-b654-20c73205bab5-operator-scripts\") pod \"keystone-19c9-account-create-update-fkslg\" (UID: \"7208d6af-3e83-4569-b654-20c73205bab5\") " pod="openstack/keystone-19c9-account-create-update-fkslg" Nov 26 22:40:59 crc kubenswrapper[4903]: I1126 22:40:59.949540 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5fcf8863-3ab3-4617-9d3e-41256bf1c3de-operator-scripts\") pod \"keystone-db-create-pd66t\" (UID: \"5fcf8863-3ab3-4617-9d3e-41256bf1c3de\") " pod="openstack/keystone-db-create-pd66t" Nov 26 22:40:59 crc kubenswrapper[4903]: I1126 22:40:59.970333 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2rhk\" (UniqueName: \"kubernetes.io/projected/5fcf8863-3ab3-4617-9d3e-41256bf1c3de-kube-api-access-z2rhk\") pod \"keystone-db-create-pd66t\" (UID: \"5fcf8863-3ab3-4617-9d3e-41256bf1c3de\") " pod="openstack/keystone-db-create-pd66t" Nov 26 22:41:00 crc kubenswrapper[4903]: I1126 22:41:00.050974 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-flzx2\" (UniqueName: \"kubernetes.io/projected/7208d6af-3e83-4569-b654-20c73205bab5-kube-api-access-flzx2\") pod \"keystone-19c9-account-create-update-fkslg\" (UID: \"7208d6af-3e83-4569-b654-20c73205bab5\") " pod="openstack/keystone-19c9-account-create-update-fkslg" Nov 26 22:41:00 crc kubenswrapper[4903]: I1126 22:41:00.051100 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7208d6af-3e83-4569-b654-20c73205bab5-operator-scripts\") pod \"keystone-19c9-account-create-update-fkslg\" (UID: \"7208d6af-3e83-4569-b654-20c73205bab5\") " pod="openstack/keystone-19c9-account-create-update-fkslg" Nov 26 22:41:00 crc kubenswrapper[4903]: I1126 22:41:00.051883 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7208d6af-3e83-4569-b654-20c73205bab5-operator-scripts\") pod \"keystone-19c9-account-create-update-fkslg\" (UID: \"7208d6af-3e83-4569-b654-20c73205bab5\") " pod="openstack/keystone-19c9-account-create-update-fkslg" Nov 26 22:41:00 crc kubenswrapper[4903]: I1126 22:41:00.066351 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-flzx2\" (UniqueName: \"kubernetes.io/projected/7208d6af-3e83-4569-b654-20c73205bab5-kube-api-access-flzx2\") pod \"keystone-19c9-account-create-update-fkslg\" (UID: \"7208d6af-3e83-4569-b654-20c73205bab5\") " pod="openstack/keystone-19c9-account-create-update-fkslg" Nov 26 22:41:00 crc kubenswrapper[4903]: I1126 22:41:00.092580 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-pd66t" Nov 26 22:41:00 crc kubenswrapper[4903]: I1126 22:41:00.124560 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-w8mjj"] Nov 26 22:41:00 crc kubenswrapper[4903]: I1126 22:41:00.125869 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-w8mjj" Nov 26 22:41:00 crc kubenswrapper[4903]: I1126 22:41:00.133305 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-w8mjj"] Nov 26 22:41:00 crc kubenswrapper[4903]: I1126 22:41:00.150235 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-78f3-account-create-update-rtnpt"] Nov 26 22:41:00 crc kubenswrapper[4903]: I1126 22:41:00.151761 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-78f3-account-create-update-rtnpt" Nov 26 22:41:00 crc kubenswrapper[4903]: I1126 22:41:00.161259 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 26 22:41:00 crc kubenswrapper[4903]: I1126 22:41:00.181066 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-78f3-account-create-update-rtnpt"] Nov 26 22:41:00 crc kubenswrapper[4903]: I1126 22:41:00.185816 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-19c9-account-create-update-fkslg" Nov 26 22:41:00 crc kubenswrapper[4903]: I1126 22:41:00.254209 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216-operator-scripts\") pod \"placement-db-create-w8mjj\" (UID: \"f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216\") " pod="openstack/placement-db-create-w8mjj" Nov 26 22:41:00 crc kubenswrapper[4903]: I1126 22:41:00.254326 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bcf866d6-b692-4e5c-bdc0-cf6569ecd016-operator-scripts\") pod \"placement-78f3-account-create-update-rtnpt\" (UID: \"bcf866d6-b692-4e5c-bdc0-cf6569ecd016\") " pod="openstack/placement-78f3-account-create-update-rtnpt" Nov 26 22:41:00 crc kubenswrapper[4903]: I1126 22:41:00.254391 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfjds\" (UniqueName: \"kubernetes.io/projected/bcf866d6-b692-4e5c-bdc0-cf6569ecd016-kube-api-access-pfjds\") pod \"placement-78f3-account-create-update-rtnpt\" (UID: \"bcf866d6-b692-4e5c-bdc0-cf6569ecd016\") " pod="openstack/placement-78f3-account-create-update-rtnpt" Nov 26 22:41:00 crc kubenswrapper[4903]: I1126 22:41:00.254488 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lqsv8\" (UniqueName: \"kubernetes.io/projected/f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216-kube-api-access-lqsv8\") pod \"placement-db-create-w8mjj\" (UID: \"f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216\") " pod="openstack/placement-db-create-w8mjj" Nov 26 22:41:00 crc kubenswrapper[4903]: I1126 22:41:00.298055 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"4d75ba9f-0873-4d65-b0c9-5347134bfcce","Type":"ContainerStarted","Data":"dfd9e5a4795c099ca8187b350c89fd8ca7cb66598f49cca978ceb1d28588cb7e"} Nov 26 22:41:00 crc kubenswrapper[4903]: I1126 22:41:00.355649 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfjds\" (UniqueName: \"kubernetes.io/projected/bcf866d6-b692-4e5c-bdc0-cf6569ecd016-kube-api-access-pfjds\") pod \"placement-78f3-account-create-update-rtnpt\" (UID: \"bcf866d6-b692-4e5c-bdc0-cf6569ecd016\") " pod="openstack/placement-78f3-account-create-update-rtnpt" Nov 26 22:41:00 crc kubenswrapper[4903]: I1126 22:41:00.355780 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lqsv8\" (UniqueName: \"kubernetes.io/projected/f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216-kube-api-access-lqsv8\") pod \"placement-db-create-w8mjj\" (UID: \"f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216\") " pod="openstack/placement-db-create-w8mjj" Nov 26 22:41:00 crc kubenswrapper[4903]: I1126 22:41:00.355831 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216-operator-scripts\") pod \"placement-db-create-w8mjj\" (UID: \"f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216\") " pod="openstack/placement-db-create-w8mjj" Nov 26 22:41:00 crc kubenswrapper[4903]: I1126 22:41:00.355912 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bcf866d6-b692-4e5c-bdc0-cf6569ecd016-operator-scripts\") pod \"placement-78f3-account-create-update-rtnpt\" (UID: \"bcf866d6-b692-4e5c-bdc0-cf6569ecd016\") " pod="openstack/placement-78f3-account-create-update-rtnpt" Nov 26 22:41:00 crc kubenswrapper[4903]: I1126 22:41:00.356630 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216-operator-scripts\") pod \"placement-db-create-w8mjj\" (UID: \"f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216\") " pod="openstack/placement-db-create-w8mjj" Nov 26 22:41:00 crc kubenswrapper[4903]: I1126 22:41:00.356784 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bcf866d6-b692-4e5c-bdc0-cf6569ecd016-operator-scripts\") pod \"placement-78f3-account-create-update-rtnpt\" (UID: \"bcf866d6-b692-4e5c-bdc0-cf6569ecd016\") " pod="openstack/placement-78f3-account-create-update-rtnpt" Nov 26 22:41:00 crc kubenswrapper[4903]: I1126 22:41:00.380770 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lqsv8\" (UniqueName: \"kubernetes.io/projected/f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216-kube-api-access-lqsv8\") pod \"placement-db-create-w8mjj\" (UID: \"f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216\") " pod="openstack/placement-db-create-w8mjj" Nov 26 22:41:00 crc kubenswrapper[4903]: I1126 22:41:00.396246 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfjds\" (UniqueName: \"kubernetes.io/projected/bcf866d6-b692-4e5c-bdc0-cf6569ecd016-kube-api-access-pfjds\") pod \"placement-78f3-account-create-update-rtnpt\" (UID: \"bcf866d6-b692-4e5c-bdc0-cf6569ecd016\") " pod="openstack/placement-78f3-account-create-update-rtnpt" Nov 26 22:41:00 crc kubenswrapper[4903]: I1126 22:41:00.442311 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-w8mjj" Nov 26 22:41:00 crc kubenswrapper[4903]: I1126 22:41:00.482177 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-78f3-account-create-update-rtnpt" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.379904 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1f34b822-e8fa-4f6d-b793-01d0e80ccb06-etc-swift\") pod \"swift-storage-0\" (UID: \"1f34b822-e8fa-4f6d-b793-01d0e80ccb06\") " pod="openstack/swift-storage-0" Nov 26 22:41:01 crc kubenswrapper[4903]: E1126 22:41:01.380155 4903 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 22:41:01 crc kubenswrapper[4903]: E1126 22:41:01.380175 4903 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 22:41:01 crc kubenswrapper[4903]: E1126 22:41:01.380227 4903 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1f34b822-e8fa-4f6d-b793-01d0e80ccb06-etc-swift podName:1f34b822-e8fa-4f6d-b793-01d0e80ccb06 nodeName:}" failed. No retries permitted until 2025-11-26 22:41:09.380210565 +0000 UTC m=+1198.070445485 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1f34b822-e8fa-4f6d-b793-01d0e80ccb06-etc-swift") pod "swift-storage-0" (UID: "1f34b822-e8fa-4f6d-b793-01d0e80ccb06") : configmap "swift-ring-files" not found Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.738983 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-df57-account-create-update-7g5m8" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.755671 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-829mx" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.768955 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-zcdbt" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.792138 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/84562f82-5408-4031-be94-2933a87dd5b0-operator-scripts\") pod \"84562f82-5408-4031-be94-2933a87dd5b0\" (UID: \"84562f82-5408-4031-be94-2933a87dd5b0\") " Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.792173 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29ec5ffe-92c2-4f30-8477-913d14b49415-operator-scripts\") pod \"29ec5ffe-92c2-4f30-8477-913d14b49415\" (UID: \"29ec5ffe-92c2-4f30-8477-913d14b49415\") " Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.792284 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4f687978-0ef1-4061-98ed-a8684824ece8-operator-scripts\") pod \"4f687978-0ef1-4061-98ed-a8684824ece8\" (UID: \"4f687978-0ef1-4061-98ed-a8684824ece8\") " Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.792383 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z5426\" (UniqueName: \"kubernetes.io/projected/29ec5ffe-92c2-4f30-8477-913d14b49415-kube-api-access-z5426\") pod \"29ec5ffe-92c2-4f30-8477-913d14b49415\" (UID: \"29ec5ffe-92c2-4f30-8477-913d14b49415\") " Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.792419 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-szcxm\" (UniqueName: \"kubernetes.io/projected/84562f82-5408-4031-be94-2933a87dd5b0-kube-api-access-szcxm\") pod \"84562f82-5408-4031-be94-2933a87dd5b0\" (UID: \"84562f82-5408-4031-be94-2933a87dd5b0\") " Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.792445 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s465z\" (UniqueName: \"kubernetes.io/projected/4f687978-0ef1-4061-98ed-a8684824ece8-kube-api-access-s465z\") pod \"4f687978-0ef1-4061-98ed-a8684824ece8\" (UID: \"4f687978-0ef1-4061-98ed-a8684824ece8\") " Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.792913 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84562f82-5408-4031-be94-2933a87dd5b0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "84562f82-5408-4031-be94-2933a87dd5b0" (UID: "84562f82-5408-4031-be94-2933a87dd5b0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.792988 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4f687978-0ef1-4061-98ed-a8684824ece8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4f687978-0ef1-4061-98ed-a8684824ece8" (UID: "4f687978-0ef1-4061-98ed-a8684824ece8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.793095 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29ec5ffe-92c2-4f30-8477-913d14b49415-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "29ec5ffe-92c2-4f30-8477-913d14b49415" (UID: "29ec5ffe-92c2-4f30-8477-913d14b49415"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.800786 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29ec5ffe-92c2-4f30-8477-913d14b49415-kube-api-access-z5426" (OuterVolumeSpecName: "kube-api-access-z5426") pod "29ec5ffe-92c2-4f30-8477-913d14b49415" (UID: "29ec5ffe-92c2-4f30-8477-913d14b49415"). InnerVolumeSpecName "kube-api-access-z5426". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.803116 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-d34e-account-create-update-2z8vz" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.816330 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f687978-0ef1-4061-98ed-a8684824ece8-kube-api-access-s465z" (OuterVolumeSpecName: "kube-api-access-s465z") pod "4f687978-0ef1-4061-98ed-a8684824ece8" (UID: "4f687978-0ef1-4061-98ed-a8684824ece8"). InnerVolumeSpecName "kube-api-access-s465z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.817746 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84562f82-5408-4031-be94-2933a87dd5b0-kube-api-access-szcxm" (OuterVolumeSpecName: "kube-api-access-szcxm") pod "84562f82-5408-4031-be94-2933a87dd5b0" (UID: "84562f82-5408-4031-be94-2933a87dd5b0"). InnerVolumeSpecName "kube-api-access-szcxm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.860394 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-d87bcd8c7-tgv7l_3ef46889-0257-408b-8f72-b8985eacb494/console/0.log" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.860484 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-d87bcd8c7-tgv7l" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.894418 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rtr45\" (UniqueName: \"kubernetes.io/projected/3ef46889-0257-408b-8f72-b8985eacb494-kube-api-access-rtr45\") pod \"3ef46889-0257-408b-8f72-b8985eacb494\" (UID: \"3ef46889-0257-408b-8f72-b8985eacb494\") " Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.894481 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tml9q\" (UniqueName: \"kubernetes.io/projected/b1a6da44-50e3-4b0d-8062-aafa8b65aaaf-kube-api-access-tml9q\") pod \"b1a6da44-50e3-4b0d-8062-aafa8b65aaaf\" (UID: \"b1a6da44-50e3-4b0d-8062-aafa8b65aaaf\") " Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.894578 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/3ef46889-0257-408b-8f72-b8985eacb494-console-oauth-config\") pod \"3ef46889-0257-408b-8f72-b8985eacb494\" (UID: \"3ef46889-0257-408b-8f72-b8985eacb494\") " Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.894714 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ef46889-0257-408b-8f72-b8985eacb494-console-serving-cert\") pod \"3ef46889-0257-408b-8f72-b8985eacb494\" (UID: \"3ef46889-0257-408b-8f72-b8985eacb494\") " Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.894810 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3ef46889-0257-408b-8f72-b8985eacb494-trusted-ca-bundle\") pod \"3ef46889-0257-408b-8f72-b8985eacb494\" (UID: \"3ef46889-0257-408b-8f72-b8985eacb494\") " Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.894863 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/3ef46889-0257-408b-8f72-b8985eacb494-console-config\") pod \"3ef46889-0257-408b-8f72-b8985eacb494\" (UID: \"3ef46889-0257-408b-8f72-b8985eacb494\") " Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.894896 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/3ef46889-0257-408b-8f72-b8985eacb494-oauth-serving-cert\") pod \"3ef46889-0257-408b-8f72-b8985eacb494\" (UID: \"3ef46889-0257-408b-8f72-b8985eacb494\") " Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.894928 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3ef46889-0257-408b-8f72-b8985eacb494-service-ca\") pod \"3ef46889-0257-408b-8f72-b8985eacb494\" (UID: \"3ef46889-0257-408b-8f72-b8985eacb494\") " Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.894988 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1a6da44-50e3-4b0d-8062-aafa8b65aaaf-operator-scripts\") pod \"b1a6da44-50e3-4b0d-8062-aafa8b65aaaf\" (UID: \"b1a6da44-50e3-4b0d-8062-aafa8b65aaaf\") " Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.895586 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z5426\" (UniqueName: \"kubernetes.io/projected/29ec5ffe-92c2-4f30-8477-913d14b49415-kube-api-access-z5426\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.895610 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-szcxm\" (UniqueName: \"kubernetes.io/projected/84562f82-5408-4031-be94-2933a87dd5b0-kube-api-access-szcxm\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.895623 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s465z\" (UniqueName: \"kubernetes.io/projected/4f687978-0ef1-4061-98ed-a8684824ece8-kube-api-access-s465z\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.895634 4903 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/84562f82-5408-4031-be94-2933a87dd5b0-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.895644 4903 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29ec5ffe-92c2-4f30-8477-913d14b49415-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.895657 4903 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4f687978-0ef1-4061-98ed-a8684824ece8-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.897403 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ef46889-0257-408b-8f72-b8985eacb494-console-config" (OuterVolumeSpecName: "console-config") pod "3ef46889-0257-408b-8f72-b8985eacb494" (UID: "3ef46889-0257-408b-8f72-b8985eacb494"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.897903 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ef46889-0257-408b-8f72-b8985eacb494-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "3ef46889-0257-408b-8f72-b8985eacb494" (UID: "3ef46889-0257-408b-8f72-b8985eacb494"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.898362 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ef46889-0257-408b-8f72-b8985eacb494-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "3ef46889-0257-408b-8f72-b8985eacb494" (UID: "3ef46889-0257-408b-8f72-b8985eacb494"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.898928 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b1a6da44-50e3-4b0d-8062-aafa8b65aaaf-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b1a6da44-50e3-4b0d-8062-aafa8b65aaaf" (UID: "b1a6da44-50e3-4b0d-8062-aafa8b65aaaf"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.904637 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ef46889-0257-408b-8f72-b8985eacb494-service-ca" (OuterVolumeSpecName: "service-ca") pod "3ef46889-0257-408b-8f72-b8985eacb494" (UID: "3ef46889-0257-408b-8f72-b8985eacb494"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.911536 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1a6da44-50e3-4b0d-8062-aafa8b65aaaf-kube-api-access-tml9q" (OuterVolumeSpecName: "kube-api-access-tml9q") pod "b1a6da44-50e3-4b0d-8062-aafa8b65aaaf" (UID: "b1a6da44-50e3-4b0d-8062-aafa8b65aaaf"). InnerVolumeSpecName "kube-api-access-tml9q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.924084 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ef46889-0257-408b-8f72-b8985eacb494-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "3ef46889-0257-408b-8f72-b8985eacb494" (UID: "3ef46889-0257-408b-8f72-b8985eacb494"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.924288 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ef46889-0257-408b-8f72-b8985eacb494-kube-api-access-rtr45" (OuterVolumeSpecName: "kube-api-access-rtr45") pod "3ef46889-0257-408b-8f72-b8985eacb494" (UID: "3ef46889-0257-408b-8f72-b8985eacb494"). InnerVolumeSpecName "kube-api-access-rtr45". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.925285 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ef46889-0257-408b-8f72-b8985eacb494-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "3ef46889-0257-408b-8f72-b8985eacb494" (UID: "3ef46889-0257-408b-8f72-b8985eacb494"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.998261 4903 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/3ef46889-0257-408b-8f72-b8985eacb494-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.998306 4903 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ef46889-0257-408b-8f72-b8985eacb494-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.998318 4903 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3ef46889-0257-408b-8f72-b8985eacb494-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.998329 4903 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/3ef46889-0257-408b-8f72-b8985eacb494-console-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.998341 4903 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/3ef46889-0257-408b-8f72-b8985eacb494-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.998351 4903 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3ef46889-0257-408b-8f72-b8985eacb494-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.998367 4903 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1a6da44-50e3-4b0d-8062-aafa8b65aaaf-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.998379 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rtr45\" (UniqueName: \"kubernetes.io/projected/3ef46889-0257-408b-8f72-b8985eacb494-kube-api-access-rtr45\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:01 crc kubenswrapper[4903]: I1126 22:41:01.998394 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tml9q\" (UniqueName: \"kubernetes.io/projected/b1a6da44-50e3-4b0d-8062-aafa8b65aaaf-kube-api-access-tml9q\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.144756 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-19c9-account-create-update-fkslg"] Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.226774 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-pd66t"] Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.242875 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-78f3-account-create-update-rtnpt"] Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.330899 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-wp2ng" event={"ID":"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc","Type":"ContainerStarted","Data":"f8129e80533f25e778d006f1a3b9cbb3e6ad4bab25565c3d58d92b2ddf019c04"} Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.339251 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-pd66t" event={"ID":"5fcf8863-3ab3-4617-9d3e-41256bf1c3de","Type":"ContainerStarted","Data":"ddcc8a007b5a6bccba404bd44c16370475177c48d6b8b92a09ca45fcba73fde3"} Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.341237 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-19c9-account-create-update-fkslg" event={"ID":"7208d6af-3e83-4569-b654-20c73205bab5","Type":"ContainerStarted","Data":"72edf5984fe7e080343e5df752f8e71ab00dd55d4bbbb6b52144f5a43c63615c"} Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.341268 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-19c9-account-create-update-fkslg" event={"ID":"7208d6af-3e83-4569-b654-20c73205bab5","Type":"ContainerStarted","Data":"b47423c73b8dab9c48cfc5c243371056acd9ef8933c18bc0c7ab67261270db1d"} Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.348273 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"910b9022-54fc-4f7d-b69b-bdb7661cb91d","Type":"ContainerStarted","Data":"0584530df49ff20fab14cd674291135a2d591f6fd7ce117d13a3ca007cce8f5d"} Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.348534 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.356984 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-wp2ng" podStartSLOduration=3.810393279 podStartE2EDuration="9.356963269s" podCreationTimestamp="2025-11-26 22:40:53 +0000 UTC" firstStartedPulling="2025-11-26 22:40:56.123511441 +0000 UTC m=+1184.813746361" lastFinishedPulling="2025-11-26 22:41:01.670081441 +0000 UTC m=+1190.360316351" observedRunningTime="2025-11-26 22:41:02.345962136 +0000 UTC m=+1191.036197046" watchObservedRunningTime="2025-11-26 22:41:02.356963269 +0000 UTC m=+1191.047198179" Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.361117 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-d87bcd8c7-tgv7l_3ef46889-0257-408b-8f72-b8985eacb494/console/0.log" Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.361327 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-d87bcd8c7-tgv7l" Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.361763 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-d87bcd8c7-tgv7l" event={"ID":"3ef46889-0257-408b-8f72-b8985eacb494","Type":"ContainerDied","Data":"502c3d7b509a8c583657b0268116a466c703672f8e8e8f34b3e30d5eef939fd4"} Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.361794 4903 scope.go:117] "RemoveContainer" containerID="017852a728600949ccb133ee9540cfa26374139c03a9d09f828ca85584962622" Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.362123 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-19c9-account-create-update-fkslg" podStartSLOduration=3.362107957 podStartE2EDuration="3.362107957s" podCreationTimestamp="2025-11-26 22:40:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:41:02.357486214 +0000 UTC m=+1191.047721124" watchObservedRunningTime="2025-11-26 22:41:02.362107957 +0000 UTC m=+1191.052342867" Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.370249 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-829mx" event={"ID":"84562f82-5408-4031-be94-2933a87dd5b0","Type":"ContainerDied","Data":"f724686a4d5a464b4124fe6ec12113b3ec01b631a9d703f1716e2f9888ff0ee8"} Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.370280 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f724686a4d5a464b4124fe6ec12113b3ec01b631a9d703f1716e2f9888ff0ee8" Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.370337 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-829mx" Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.372423 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-78f3-account-create-update-rtnpt" event={"ID":"bcf866d6-b692-4e5c-bdc0-cf6569ecd016","Type":"ContainerStarted","Data":"b0cd632055a00f00e619dca54bbcb356e8377d81ab92bc03a70d103289e86ecc"} Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.374144 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-zcdbt" event={"ID":"4f687978-0ef1-4061-98ed-a8684824ece8","Type":"ContainerDied","Data":"5340cbd1510aa0fa4e138308f0af7f7138f32eeb31317fc921d9229847a997b2"} Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.374237 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5340cbd1510aa0fa4e138308f0af7f7138f32eeb31317fc921d9229847a997b2" Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.374330 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-zcdbt" Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.376053 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-d34e-account-create-update-2z8vz" event={"ID":"b1a6da44-50e3-4b0d-8062-aafa8b65aaaf","Type":"ContainerDied","Data":"76cbae9c9efce3e3a0e4d6ced21bfbedf8455c6a05ae9f043f0f36c970c228b3"} Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.376210 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="76cbae9c9efce3e3a0e4d6ced21bfbedf8455c6a05ae9f043f0f36c970c228b3" Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.376195 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-d34e-account-create-update-2z8vz" Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.383639 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-df57-account-create-update-7g5m8" event={"ID":"29ec5ffe-92c2-4f30-8477-913d14b49415","Type":"ContainerDied","Data":"9c585224804b306e75aaacd68dfc3282db5207c602d656e12610310f62e29b9a"} Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.383680 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9c585224804b306e75aaacd68dfc3282db5207c602d656e12610310f62e29b9a" Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.383767 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-df57-account-create-update-7g5m8" Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.392856 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=57.392836189 podStartE2EDuration="57.392836189s" podCreationTimestamp="2025-11-26 22:40:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:41:02.383165331 +0000 UTC m=+1191.073400241" watchObservedRunningTime="2025-11-26 22:41:02.392836189 +0000 UTC m=+1191.083071089" Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.407265 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-d87bcd8c7-tgv7l"] Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.419580 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-d87bcd8c7-tgv7l"] Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.437886 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-w8mjj"] Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.458166 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.740299 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-698758b865-5vn9p" Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.811928 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-b82qc"] Nov 26 22:41:02 crc kubenswrapper[4903]: I1126 22:41:02.812249 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d769cc4f-b82qc" podUID="0c6654bc-c2b5-45a0-93ab-c338beb90d3c" containerName="dnsmasq-dns" containerID="cri-o://ab45a90e503b07f98ea3393a14e6933f48f3a82ac505b1800996d6c6b966457c" gracePeriod=10 Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.400943 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-b82qc" Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.406823 4903 generic.go:334] "Generic (PLEG): container finished" podID="bcf866d6-b692-4e5c-bdc0-cf6569ecd016" containerID="1cf6b0a259ad9abc4cf723013d2f0777e47205e3bcc59ff92232da8e141ec567" exitCode=0 Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.406886 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-78f3-account-create-update-rtnpt" event={"ID":"bcf866d6-b692-4e5c-bdc0-cf6569ecd016","Type":"ContainerDied","Data":"1cf6b0a259ad9abc4cf723013d2f0777e47205e3bcc59ff92232da8e141ec567"} Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.409678 4903 generic.go:334] "Generic (PLEG): container finished" podID="0c6654bc-c2b5-45a0-93ab-c338beb90d3c" containerID="ab45a90e503b07f98ea3393a14e6933f48f3a82ac505b1800996d6c6b966457c" exitCode=0 Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.409751 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-b82qc" event={"ID":"0c6654bc-c2b5-45a0-93ab-c338beb90d3c","Type":"ContainerDied","Data":"ab45a90e503b07f98ea3393a14e6933f48f3a82ac505b1800996d6c6b966457c"} Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.409770 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-b82qc" event={"ID":"0c6654bc-c2b5-45a0-93ab-c338beb90d3c","Type":"ContainerDied","Data":"4bc2a786659e06bac65141c0e9320203f67e6daa8bbfe28433289e7e76393487"} Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.409791 4903 scope.go:117] "RemoveContainer" containerID="ab45a90e503b07f98ea3393a14e6933f48f3a82ac505b1800996d6c6b966457c" Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.409939 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-b82qc" Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.414986 4903 generic.go:334] "Generic (PLEG): container finished" podID="f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216" containerID="81ff826347c0a76aa20e2da23dd58e1ec3ea3df1605e8184063e786d61c5a714" exitCode=0 Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.415117 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-w8mjj" event={"ID":"f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216","Type":"ContainerDied","Data":"81ff826347c0a76aa20e2da23dd58e1ec3ea3df1605e8184063e786d61c5a714"} Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.415168 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-w8mjj" event={"ID":"f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216","Type":"ContainerStarted","Data":"e9d70dfb24098eec5a1a9245df79de5f9f2cbb6d36015eac54fe7aaf6e4efe6a"} Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.490160 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-898g2\" (UniqueName: \"kubernetes.io/projected/0c6654bc-c2b5-45a0-93ab-c338beb90d3c-kube-api-access-898g2\") pod \"0c6654bc-c2b5-45a0-93ab-c338beb90d3c\" (UID: \"0c6654bc-c2b5-45a0-93ab-c338beb90d3c\") " Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.491183 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0c6654bc-c2b5-45a0-93ab-c338beb90d3c-dns-svc\") pod \"0c6654bc-c2b5-45a0-93ab-c338beb90d3c\" (UID: \"0c6654bc-c2b5-45a0-93ab-c338beb90d3c\") " Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.491205 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c6654bc-c2b5-45a0-93ab-c338beb90d3c-config\") pod \"0c6654bc-c2b5-45a0-93ab-c338beb90d3c\" (UID: \"0c6654bc-c2b5-45a0-93ab-c338beb90d3c\") " Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.496298 4903 generic.go:334] "Generic (PLEG): container finished" podID="5fcf8863-3ab3-4617-9d3e-41256bf1c3de" containerID="00986bc0d77af6ad74e6112ba9111364d0b84ad7b86d1e2512afc5825e888bdc" exitCode=0 Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.496491 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-pd66t" event={"ID":"5fcf8863-3ab3-4617-9d3e-41256bf1c3de","Type":"ContainerDied","Data":"00986bc0d77af6ad74e6112ba9111364d0b84ad7b86d1e2512afc5825e888bdc"} Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.504085 4903 generic.go:334] "Generic (PLEG): container finished" podID="7208d6af-3e83-4569-b654-20c73205bab5" containerID="72edf5984fe7e080343e5df752f8e71ab00dd55d4bbbb6b52144f5a43c63615c" exitCode=0 Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.504146 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-19c9-account-create-update-fkslg" event={"ID":"7208d6af-3e83-4569-b654-20c73205bab5","Type":"ContainerDied","Data":"72edf5984fe7e080343e5df752f8e71ab00dd55d4bbbb6b52144f5a43c63615c"} Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.546925 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c6654bc-c2b5-45a0-93ab-c338beb90d3c-kube-api-access-898g2" (OuterVolumeSpecName: "kube-api-access-898g2") pod "0c6654bc-c2b5-45a0-93ab-c338beb90d3c" (UID: "0c6654bc-c2b5-45a0-93ab-c338beb90d3c"). InnerVolumeSpecName "kube-api-access-898g2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.588955 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0c6654bc-c2b5-45a0-93ab-c338beb90d3c-config" (OuterVolumeSpecName: "config") pod "0c6654bc-c2b5-45a0-93ab-c338beb90d3c" (UID: "0c6654bc-c2b5-45a0-93ab-c338beb90d3c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.594609 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-898g2\" (UniqueName: \"kubernetes.io/projected/0c6654bc-c2b5-45a0-93ab-c338beb90d3c-kube-api-access-898g2\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.594637 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c6654bc-c2b5-45a0-93ab-c338beb90d3c-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.632186 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0c6654bc-c2b5-45a0-93ab-c338beb90d3c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0c6654bc-c2b5-45a0-93ab-c338beb90d3c" (UID: "0c6654bc-c2b5-45a0-93ab-c338beb90d3c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.684919 4903 scope.go:117] "RemoveContainer" containerID="7740e5855e741a31e18e489b7abeef68e7faa5bce40ce28530f9421b07e99494" Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.698022 4903 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0c6654bc-c2b5-45a0-93ab-c338beb90d3c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.713881 4903 scope.go:117] "RemoveContainer" containerID="ab45a90e503b07f98ea3393a14e6933f48f3a82ac505b1800996d6c6b966457c" Nov 26 22:41:03 crc kubenswrapper[4903]: E1126 22:41:03.720252 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab45a90e503b07f98ea3393a14e6933f48f3a82ac505b1800996d6c6b966457c\": container with ID starting with ab45a90e503b07f98ea3393a14e6933f48f3a82ac505b1800996d6c6b966457c not found: ID does not exist" containerID="ab45a90e503b07f98ea3393a14e6933f48f3a82ac505b1800996d6c6b966457c" Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.720309 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab45a90e503b07f98ea3393a14e6933f48f3a82ac505b1800996d6c6b966457c"} err="failed to get container status \"ab45a90e503b07f98ea3393a14e6933f48f3a82ac505b1800996d6c6b966457c\": rpc error: code = NotFound desc = could not find container \"ab45a90e503b07f98ea3393a14e6933f48f3a82ac505b1800996d6c6b966457c\": container with ID starting with ab45a90e503b07f98ea3393a14e6933f48f3a82ac505b1800996d6c6b966457c not found: ID does not exist" Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.720334 4903 scope.go:117] "RemoveContainer" containerID="7740e5855e741a31e18e489b7abeef68e7faa5bce40ce28530f9421b07e99494" Nov 26 22:41:03 crc kubenswrapper[4903]: E1126 22:41:03.720772 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7740e5855e741a31e18e489b7abeef68e7faa5bce40ce28530f9421b07e99494\": container with ID starting with 7740e5855e741a31e18e489b7abeef68e7faa5bce40ce28530f9421b07e99494 not found: ID does not exist" containerID="7740e5855e741a31e18e489b7abeef68e7faa5bce40ce28530f9421b07e99494" Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.720815 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7740e5855e741a31e18e489b7abeef68e7faa5bce40ce28530f9421b07e99494"} err="failed to get container status \"7740e5855e741a31e18e489b7abeef68e7faa5bce40ce28530f9421b07e99494\": rpc error: code = NotFound desc = could not find container \"7740e5855e741a31e18e489b7abeef68e7faa5bce40ce28530f9421b07e99494\": container with ID starting with 7740e5855e741a31e18e489b7abeef68e7faa5bce40ce28530f9421b07e99494 not found: ID does not exist" Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.787848 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-b82qc"] Nov 26 22:41:03 crc kubenswrapper[4903]: I1126 22:41:03.812306 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-b82qc"] Nov 26 22:41:04 crc kubenswrapper[4903]: I1126 22:41:04.045450 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c6654bc-c2b5-45a0-93ab-c338beb90d3c" path="/var/lib/kubelet/pods/0c6654bc-c2b5-45a0-93ab-c338beb90d3c/volumes" Nov 26 22:41:04 crc kubenswrapper[4903]: I1126 22:41:04.047329 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ef46889-0257-408b-8f72-b8985eacb494" path="/var/lib/kubelet/pods/3ef46889-0257-408b-8f72-b8985eacb494/volumes" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.551853 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-tz77l"] Nov 26 22:41:05 crc kubenswrapper[4903]: E1126 22:41:05.552644 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c6654bc-c2b5-45a0-93ab-c338beb90d3c" containerName="init" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.552656 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c6654bc-c2b5-45a0-93ab-c338beb90d3c" containerName="init" Nov 26 22:41:05 crc kubenswrapper[4903]: E1126 22:41:05.552671 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29ec5ffe-92c2-4f30-8477-913d14b49415" containerName="mariadb-account-create-update" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.552679 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="29ec5ffe-92c2-4f30-8477-913d14b49415" containerName="mariadb-account-create-update" Nov 26 22:41:05 crc kubenswrapper[4903]: E1126 22:41:05.552708 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84562f82-5408-4031-be94-2933a87dd5b0" containerName="mariadb-database-create" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.552714 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="84562f82-5408-4031-be94-2933a87dd5b0" containerName="mariadb-database-create" Nov 26 22:41:05 crc kubenswrapper[4903]: E1126 22:41:05.552728 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c6654bc-c2b5-45a0-93ab-c338beb90d3c" containerName="dnsmasq-dns" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.552734 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c6654bc-c2b5-45a0-93ab-c338beb90d3c" containerName="dnsmasq-dns" Nov 26 22:41:05 crc kubenswrapper[4903]: E1126 22:41:05.552749 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f687978-0ef1-4061-98ed-a8684824ece8" containerName="mariadb-database-create" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.552755 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f687978-0ef1-4061-98ed-a8684824ece8" containerName="mariadb-database-create" Nov 26 22:41:05 crc kubenswrapper[4903]: E1126 22:41:05.552769 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ef46889-0257-408b-8f72-b8985eacb494" containerName="console" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.552775 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ef46889-0257-408b-8f72-b8985eacb494" containerName="console" Nov 26 22:41:05 crc kubenswrapper[4903]: E1126 22:41:05.552787 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1a6da44-50e3-4b0d-8062-aafa8b65aaaf" containerName="mariadb-account-create-update" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.552793 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1a6da44-50e3-4b0d-8062-aafa8b65aaaf" containerName="mariadb-account-create-update" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.552998 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ef46889-0257-408b-8f72-b8985eacb494" containerName="console" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.553024 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1a6da44-50e3-4b0d-8062-aafa8b65aaaf" containerName="mariadb-account-create-update" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.553039 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c6654bc-c2b5-45a0-93ab-c338beb90d3c" containerName="dnsmasq-dns" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.553048 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="29ec5ffe-92c2-4f30-8477-913d14b49415" containerName="mariadb-account-create-update" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.553059 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="84562f82-5408-4031-be94-2933a87dd5b0" containerName="mariadb-database-create" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.553070 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f687978-0ef1-4061-98ed-a8684824ece8" containerName="mariadb-database-create" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.553815 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-tz77l" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.555934 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.556123 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-m2kjp" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.559842 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-tz77l"] Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.742800 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2388445a-1656-41aa-8daa-a120993c24ad-combined-ca-bundle\") pod \"glance-db-sync-tz77l\" (UID: \"2388445a-1656-41aa-8daa-a120993c24ad\") " pod="openstack/glance-db-sync-tz77l" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.742850 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-74hsf\" (UniqueName: \"kubernetes.io/projected/2388445a-1656-41aa-8daa-a120993c24ad-kube-api-access-74hsf\") pod \"glance-db-sync-tz77l\" (UID: \"2388445a-1656-41aa-8daa-a120993c24ad\") " pod="openstack/glance-db-sync-tz77l" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.742927 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2388445a-1656-41aa-8daa-a120993c24ad-db-sync-config-data\") pod \"glance-db-sync-tz77l\" (UID: \"2388445a-1656-41aa-8daa-a120993c24ad\") " pod="openstack/glance-db-sync-tz77l" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.743071 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2388445a-1656-41aa-8daa-a120993c24ad-config-data\") pod \"glance-db-sync-tz77l\" (UID: \"2388445a-1656-41aa-8daa-a120993c24ad\") " pod="openstack/glance-db-sync-tz77l" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.844914 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2388445a-1656-41aa-8daa-a120993c24ad-db-sync-config-data\") pod \"glance-db-sync-tz77l\" (UID: \"2388445a-1656-41aa-8daa-a120993c24ad\") " pod="openstack/glance-db-sync-tz77l" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.844982 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2388445a-1656-41aa-8daa-a120993c24ad-config-data\") pod \"glance-db-sync-tz77l\" (UID: \"2388445a-1656-41aa-8daa-a120993c24ad\") " pod="openstack/glance-db-sync-tz77l" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.845083 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2388445a-1656-41aa-8daa-a120993c24ad-combined-ca-bundle\") pod \"glance-db-sync-tz77l\" (UID: \"2388445a-1656-41aa-8daa-a120993c24ad\") " pod="openstack/glance-db-sync-tz77l" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.845108 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-74hsf\" (UniqueName: \"kubernetes.io/projected/2388445a-1656-41aa-8daa-a120993c24ad-kube-api-access-74hsf\") pod \"glance-db-sync-tz77l\" (UID: \"2388445a-1656-41aa-8daa-a120993c24ad\") " pod="openstack/glance-db-sync-tz77l" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.851043 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2388445a-1656-41aa-8daa-a120993c24ad-db-sync-config-data\") pod \"glance-db-sync-tz77l\" (UID: \"2388445a-1656-41aa-8daa-a120993c24ad\") " pod="openstack/glance-db-sync-tz77l" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.851058 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2388445a-1656-41aa-8daa-a120993c24ad-combined-ca-bundle\") pod \"glance-db-sync-tz77l\" (UID: \"2388445a-1656-41aa-8daa-a120993c24ad\") " pod="openstack/glance-db-sync-tz77l" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.855232 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2388445a-1656-41aa-8daa-a120993c24ad-config-data\") pod \"glance-db-sync-tz77l\" (UID: \"2388445a-1656-41aa-8daa-a120993c24ad\") " pod="openstack/glance-db-sync-tz77l" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.861446 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-74hsf\" (UniqueName: \"kubernetes.io/projected/2388445a-1656-41aa-8daa-a120993c24ad-kube-api-access-74hsf\") pod \"glance-db-sync-tz77l\" (UID: \"2388445a-1656-41aa-8daa-a120993c24ad\") " pod="openstack/glance-db-sync-tz77l" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.876241 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-tz77l" Nov 26 22:41:05 crc kubenswrapper[4903]: I1126 22:41:05.961479 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-w8mjj" Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.010078 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-78f3-account-create-update-rtnpt" Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.021173 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-19c9-account-create-update-fkslg" Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.069208 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-pd66t" Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.152089 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7208d6af-3e83-4569-b654-20c73205bab5-operator-scripts\") pod \"7208d6af-3e83-4569-b654-20c73205bab5\" (UID: \"7208d6af-3e83-4569-b654-20c73205bab5\") " Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.152133 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bcf866d6-b692-4e5c-bdc0-cf6569ecd016-operator-scripts\") pod \"bcf866d6-b692-4e5c-bdc0-cf6569ecd016\" (UID: \"bcf866d6-b692-4e5c-bdc0-cf6569ecd016\") " Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.152200 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-flzx2\" (UniqueName: \"kubernetes.io/projected/7208d6af-3e83-4569-b654-20c73205bab5-kube-api-access-flzx2\") pod \"7208d6af-3e83-4569-b654-20c73205bab5\" (UID: \"7208d6af-3e83-4569-b654-20c73205bab5\") " Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.152247 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lqsv8\" (UniqueName: \"kubernetes.io/projected/f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216-kube-api-access-lqsv8\") pod \"f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216\" (UID: \"f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216\") " Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.152325 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216-operator-scripts\") pod \"f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216\" (UID: \"f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216\") " Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.152412 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pfjds\" (UniqueName: \"kubernetes.io/projected/bcf866d6-b692-4e5c-bdc0-cf6569ecd016-kube-api-access-pfjds\") pod \"bcf866d6-b692-4e5c-bdc0-cf6569ecd016\" (UID: \"bcf866d6-b692-4e5c-bdc0-cf6569ecd016\") " Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.155153 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7208d6af-3e83-4569-b654-20c73205bab5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7208d6af-3e83-4569-b654-20c73205bab5" (UID: "7208d6af-3e83-4569-b654-20c73205bab5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.155480 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bcf866d6-b692-4e5c-bdc0-cf6569ecd016-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bcf866d6-b692-4e5c-bdc0-cf6569ecd016" (UID: "bcf866d6-b692-4e5c-bdc0-cf6569ecd016"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.156952 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216" (UID: "f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.163712 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7208d6af-3e83-4569-b654-20c73205bab5-kube-api-access-flzx2" (OuterVolumeSpecName: "kube-api-access-flzx2") pod "7208d6af-3e83-4569-b654-20c73205bab5" (UID: "7208d6af-3e83-4569-b654-20c73205bab5"). InnerVolumeSpecName "kube-api-access-flzx2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.163770 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bcf866d6-b692-4e5c-bdc0-cf6569ecd016-kube-api-access-pfjds" (OuterVolumeSpecName: "kube-api-access-pfjds") pod "bcf866d6-b692-4e5c-bdc0-cf6569ecd016" (UID: "bcf866d6-b692-4e5c-bdc0-cf6569ecd016"). InnerVolumeSpecName "kube-api-access-pfjds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.163844 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216-kube-api-access-lqsv8" (OuterVolumeSpecName: "kube-api-access-lqsv8") pod "f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216" (UID: "f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216"). InnerVolumeSpecName "kube-api-access-lqsv8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.254847 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z2rhk\" (UniqueName: \"kubernetes.io/projected/5fcf8863-3ab3-4617-9d3e-41256bf1c3de-kube-api-access-z2rhk\") pod \"5fcf8863-3ab3-4617-9d3e-41256bf1c3de\" (UID: \"5fcf8863-3ab3-4617-9d3e-41256bf1c3de\") " Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.254981 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5fcf8863-3ab3-4617-9d3e-41256bf1c3de-operator-scripts\") pod \"5fcf8863-3ab3-4617-9d3e-41256bf1c3de\" (UID: \"5fcf8863-3ab3-4617-9d3e-41256bf1c3de\") " Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.255616 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5fcf8863-3ab3-4617-9d3e-41256bf1c3de-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5fcf8863-3ab3-4617-9d3e-41256bf1c3de" (UID: "5fcf8863-3ab3-4617-9d3e-41256bf1c3de"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.256016 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pfjds\" (UniqueName: \"kubernetes.io/projected/bcf866d6-b692-4e5c-bdc0-cf6569ecd016-kube-api-access-pfjds\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.256039 4903 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5fcf8863-3ab3-4617-9d3e-41256bf1c3de-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.256055 4903 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7208d6af-3e83-4569-b654-20c73205bab5-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.256072 4903 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bcf866d6-b692-4e5c-bdc0-cf6569ecd016-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.256088 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-flzx2\" (UniqueName: \"kubernetes.io/projected/7208d6af-3e83-4569-b654-20c73205bab5-kube-api-access-flzx2\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.256107 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lqsv8\" (UniqueName: \"kubernetes.io/projected/f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216-kube-api-access-lqsv8\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.256123 4903 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.258892 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fcf8863-3ab3-4617-9d3e-41256bf1c3de-kube-api-access-z2rhk" (OuterVolumeSpecName: "kube-api-access-z2rhk") pod "5fcf8863-3ab3-4617-9d3e-41256bf1c3de" (UID: "5fcf8863-3ab3-4617-9d3e-41256bf1c3de"). InnerVolumeSpecName "kube-api-access-z2rhk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.358186 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z2rhk\" (UniqueName: \"kubernetes.io/projected/5fcf8863-3ab3-4617-9d3e-41256bf1c3de-kube-api-access-z2rhk\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.555224 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-78f3-account-create-update-rtnpt" event={"ID":"bcf866d6-b692-4e5c-bdc0-cf6569ecd016","Type":"ContainerDied","Data":"b0cd632055a00f00e619dca54bbcb356e8377d81ab92bc03a70d103289e86ecc"} Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.555281 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b0cd632055a00f00e619dca54bbcb356e8377d81ab92bc03a70d103289e86ecc" Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.555369 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-78f3-account-create-update-rtnpt" Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.556921 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-w8mjj" Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.556921 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-w8mjj" event={"ID":"f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216","Type":"ContainerDied","Data":"e9d70dfb24098eec5a1a9245df79de5f9f2cbb6d36015eac54fe7aaf6e4efe6a"} Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.557029 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e9d70dfb24098eec5a1a9245df79de5f9f2cbb6d36015eac54fe7aaf6e4efe6a" Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.559191 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"4d75ba9f-0873-4d65-b0c9-5347134bfcce","Type":"ContainerStarted","Data":"4903447e5ba140e892d2c054b0a62788eae2b2ad99a4b2788c389b81207df939"} Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.564946 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-pd66t" event={"ID":"5fcf8863-3ab3-4617-9d3e-41256bf1c3de","Type":"ContainerDied","Data":"ddcc8a007b5a6bccba404bd44c16370475177c48d6b8b92a09ca45fcba73fde3"} Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.564985 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ddcc8a007b5a6bccba404bd44c16370475177c48d6b8b92a09ca45fcba73fde3" Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.564956 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-pd66t" Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.565113 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-kzb8j" podUID="1aa29ea2-aaab-435e-9995-41a5f137be03" containerName="ovn-controller" probeResult="failure" output=< Nov 26 22:41:06 crc kubenswrapper[4903]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 26 22:41:06 crc kubenswrapper[4903]: > Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.566648 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-19c9-account-create-update-fkslg" event={"ID":"7208d6af-3e83-4569-b654-20c73205bab5","Type":"ContainerDied","Data":"b47423c73b8dab9c48cfc5c243371056acd9ef8933c18bc0c7ab67261270db1d"} Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.566669 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b47423c73b8dab9c48cfc5c243371056acd9ef8933c18bc0c7ab67261270db1d" Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.566726 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-19c9-account-create-update-fkslg" Nov 26 22:41:06 crc kubenswrapper[4903]: I1126 22:41:06.582256 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=13.302568378 podStartE2EDuration="54.582241255s" podCreationTimestamp="2025-11-26 22:40:12 +0000 UTC" firstStartedPulling="2025-11-26 22:40:24.625012859 +0000 UTC m=+1153.315247769" lastFinishedPulling="2025-11-26 22:41:05.904685736 +0000 UTC m=+1194.594920646" observedRunningTime="2025-11-26 22:41:06.580179761 +0000 UTC m=+1195.270414681" watchObservedRunningTime="2025-11-26 22:41:06.582241255 +0000 UTC m=+1195.272476165" Nov 26 22:41:07 crc kubenswrapper[4903]: I1126 22:41:07.220471 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-tz77l"] Nov 26 22:41:07 crc kubenswrapper[4903]: W1126 22:41:07.223178 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2388445a_1656_41aa_8daa_a120993c24ad.slice/crio-7b9fc8812304e421452e677cfcad31195cd15ae6a2c8e9db8349f42180da3931 WatchSource:0}: Error finding container 7b9fc8812304e421452e677cfcad31195cd15ae6a2c8e9db8349f42180da3931: Status 404 returned error can't find the container with id 7b9fc8812304e421452e677cfcad31195cd15ae6a2c8e9db8349f42180da3931 Nov 26 22:41:07 crc kubenswrapper[4903]: I1126 22:41:07.581085 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-tz77l" event={"ID":"2388445a-1656-41aa-8daa-a120993c24ad","Type":"ContainerStarted","Data":"7b9fc8812304e421452e677cfcad31195cd15ae6a2c8e9db8349f42180da3931"} Nov 26 22:41:07 crc kubenswrapper[4903]: I1126 22:41:07.690892 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-pbphv"] Nov 26 22:41:07 crc kubenswrapper[4903]: E1126 22:41:07.691375 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5fcf8863-3ab3-4617-9d3e-41256bf1c3de" containerName="mariadb-database-create" Nov 26 22:41:07 crc kubenswrapper[4903]: I1126 22:41:07.691392 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="5fcf8863-3ab3-4617-9d3e-41256bf1c3de" containerName="mariadb-database-create" Nov 26 22:41:07 crc kubenswrapper[4903]: E1126 22:41:07.691410 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216" containerName="mariadb-database-create" Nov 26 22:41:07 crc kubenswrapper[4903]: I1126 22:41:07.691418 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216" containerName="mariadb-database-create" Nov 26 22:41:07 crc kubenswrapper[4903]: E1126 22:41:07.691444 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7208d6af-3e83-4569-b654-20c73205bab5" containerName="mariadb-account-create-update" Nov 26 22:41:07 crc kubenswrapper[4903]: I1126 22:41:07.691452 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="7208d6af-3e83-4569-b654-20c73205bab5" containerName="mariadb-account-create-update" Nov 26 22:41:07 crc kubenswrapper[4903]: E1126 22:41:07.691472 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcf866d6-b692-4e5c-bdc0-cf6569ecd016" containerName="mariadb-account-create-update" Nov 26 22:41:07 crc kubenswrapper[4903]: I1126 22:41:07.691481 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcf866d6-b692-4e5c-bdc0-cf6569ecd016" containerName="mariadb-account-create-update" Nov 26 22:41:07 crc kubenswrapper[4903]: I1126 22:41:07.691908 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216" containerName="mariadb-database-create" Nov 26 22:41:07 crc kubenswrapper[4903]: I1126 22:41:07.692126 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="7208d6af-3e83-4569-b654-20c73205bab5" containerName="mariadb-account-create-update" Nov 26 22:41:07 crc kubenswrapper[4903]: I1126 22:41:07.692152 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="bcf866d6-b692-4e5c-bdc0-cf6569ecd016" containerName="mariadb-account-create-update" Nov 26 22:41:07 crc kubenswrapper[4903]: I1126 22:41:07.692163 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="5fcf8863-3ab3-4617-9d3e-41256bf1c3de" containerName="mariadb-database-create" Nov 26 22:41:07 crc kubenswrapper[4903]: I1126 22:41:07.693068 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-pbphv" Nov 26 22:41:07 crc kubenswrapper[4903]: I1126 22:41:07.726079 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-pbphv"] Nov 26 22:41:07 crc kubenswrapper[4903]: I1126 22:41:07.790781 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f7711917-d475-44bb-8393-a803e720f32d-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-pbphv\" (UID: \"f7711917-d475-44bb-8393-a803e720f32d\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-pbphv" Nov 26 22:41:07 crc kubenswrapper[4903]: I1126 22:41:07.790975 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wk2xt\" (UniqueName: \"kubernetes.io/projected/f7711917-d475-44bb-8393-a803e720f32d-kube-api-access-wk2xt\") pod \"mysqld-exporter-openstack-cell1-db-create-pbphv\" (UID: \"f7711917-d475-44bb-8393-a803e720f32d\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-pbphv" Nov 26 22:41:07 crc kubenswrapper[4903]: I1126 22:41:07.893315 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f7711917-d475-44bb-8393-a803e720f32d-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-pbphv\" (UID: \"f7711917-d475-44bb-8393-a803e720f32d\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-pbphv" Nov 26 22:41:07 crc kubenswrapper[4903]: I1126 22:41:07.893569 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wk2xt\" (UniqueName: \"kubernetes.io/projected/f7711917-d475-44bb-8393-a803e720f32d-kube-api-access-wk2xt\") pod \"mysqld-exporter-openstack-cell1-db-create-pbphv\" (UID: \"f7711917-d475-44bb-8393-a803e720f32d\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-pbphv" Nov 26 22:41:07 crc kubenswrapper[4903]: I1126 22:41:07.894161 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f7711917-d475-44bb-8393-a803e720f32d-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-pbphv\" (UID: \"f7711917-d475-44bb-8393-a803e720f32d\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-pbphv" Nov 26 22:41:07 crc kubenswrapper[4903]: I1126 22:41:07.902292 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-f978-account-create-update-cxc2n"] Nov 26 22:41:07 crc kubenswrapper[4903]: I1126 22:41:07.903857 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-f978-account-create-update-cxc2n" Nov 26 22:41:07 crc kubenswrapper[4903]: I1126 22:41:07.910172 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-openstack-cell1-db-secret" Nov 26 22:41:07 crc kubenswrapper[4903]: I1126 22:41:07.917550 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-f978-account-create-update-cxc2n"] Nov 26 22:41:07 crc kubenswrapper[4903]: I1126 22:41:07.918975 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wk2xt\" (UniqueName: \"kubernetes.io/projected/f7711917-d475-44bb-8393-a803e720f32d-kube-api-access-wk2xt\") pod \"mysqld-exporter-openstack-cell1-db-create-pbphv\" (UID: \"f7711917-d475-44bb-8393-a803e720f32d\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-pbphv" Nov 26 22:41:07 crc kubenswrapper[4903]: I1126 22:41:07.995455 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fb5wh\" (UniqueName: \"kubernetes.io/projected/7ebba586-8f3a-4148-aec8-687eb566f1b0-kube-api-access-fb5wh\") pod \"mysqld-exporter-f978-account-create-update-cxc2n\" (UID: \"7ebba586-8f3a-4148-aec8-687eb566f1b0\") " pod="openstack/mysqld-exporter-f978-account-create-update-cxc2n" Nov 26 22:41:07 crc kubenswrapper[4903]: I1126 22:41:07.995664 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7ebba586-8f3a-4148-aec8-687eb566f1b0-operator-scripts\") pod \"mysqld-exporter-f978-account-create-update-cxc2n\" (UID: \"7ebba586-8f3a-4148-aec8-687eb566f1b0\") " pod="openstack/mysqld-exporter-f978-account-create-update-cxc2n" Nov 26 22:41:08 crc kubenswrapper[4903]: I1126 22:41:08.015217 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-pbphv" Nov 26 22:41:08 crc kubenswrapper[4903]: I1126 22:41:08.097565 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7ebba586-8f3a-4148-aec8-687eb566f1b0-operator-scripts\") pod \"mysqld-exporter-f978-account-create-update-cxc2n\" (UID: \"7ebba586-8f3a-4148-aec8-687eb566f1b0\") " pod="openstack/mysqld-exporter-f978-account-create-update-cxc2n" Nov 26 22:41:08 crc kubenswrapper[4903]: I1126 22:41:08.097714 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fb5wh\" (UniqueName: \"kubernetes.io/projected/7ebba586-8f3a-4148-aec8-687eb566f1b0-kube-api-access-fb5wh\") pod \"mysqld-exporter-f978-account-create-update-cxc2n\" (UID: \"7ebba586-8f3a-4148-aec8-687eb566f1b0\") " pod="openstack/mysqld-exporter-f978-account-create-update-cxc2n" Nov 26 22:41:08 crc kubenswrapper[4903]: I1126 22:41:08.098840 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7ebba586-8f3a-4148-aec8-687eb566f1b0-operator-scripts\") pod \"mysqld-exporter-f978-account-create-update-cxc2n\" (UID: \"7ebba586-8f3a-4148-aec8-687eb566f1b0\") " pod="openstack/mysqld-exporter-f978-account-create-update-cxc2n" Nov 26 22:41:08 crc kubenswrapper[4903]: I1126 22:41:08.117522 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fb5wh\" (UniqueName: \"kubernetes.io/projected/7ebba586-8f3a-4148-aec8-687eb566f1b0-kube-api-access-fb5wh\") pod \"mysqld-exporter-f978-account-create-update-cxc2n\" (UID: \"7ebba586-8f3a-4148-aec8-687eb566f1b0\") " pod="openstack/mysqld-exporter-f978-account-create-update-cxc2n" Nov 26 22:41:08 crc kubenswrapper[4903]: I1126 22:41:08.219009 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-f978-account-create-update-cxc2n" Nov 26 22:41:08 crc kubenswrapper[4903]: I1126 22:41:08.536320 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:08 crc kubenswrapper[4903]: I1126 22:41:08.575390 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-pbphv"] Nov 26 22:41:08 crc kubenswrapper[4903]: W1126 22:41:08.587747 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf7711917_d475_44bb_8393_a803e720f32d.slice/crio-948cb80c16cf9d486b61f1f29f291a412d302884d31ca854613768307ace8a41 WatchSource:0}: Error finding container 948cb80c16cf9d486b61f1f29f291a412d302884d31ca854613768307ace8a41: Status 404 returned error can't find the container with id 948cb80c16cf9d486b61f1f29f291a412d302884d31ca854613768307ace8a41 Nov 26 22:41:08 crc kubenswrapper[4903]: I1126 22:41:08.738993 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-f978-account-create-update-cxc2n"] Nov 26 22:41:08 crc kubenswrapper[4903]: W1126 22:41:08.743309 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7ebba586_8f3a_4148_aec8_687eb566f1b0.slice/crio-6d2fc5eb920c7513bbb0345ba5b6a73fa7ad82407277718a8e0a9c017c35c6ff WatchSource:0}: Error finding container 6d2fc5eb920c7513bbb0345ba5b6a73fa7ad82407277718a8e0a9c017c35c6ff: Status 404 returned error can't find the container with id 6d2fc5eb920c7513bbb0345ba5b6a73fa7ad82407277718a8e0a9c017c35c6ff Nov 26 22:41:09 crc kubenswrapper[4903]: I1126 22:41:09.426531 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1f34b822-e8fa-4f6d-b793-01d0e80ccb06-etc-swift\") pod \"swift-storage-0\" (UID: \"1f34b822-e8fa-4f6d-b793-01d0e80ccb06\") " pod="openstack/swift-storage-0" Nov 26 22:41:09 crc kubenswrapper[4903]: I1126 22:41:09.433789 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1f34b822-e8fa-4f6d-b793-01d0e80ccb06-etc-swift\") pod \"swift-storage-0\" (UID: \"1f34b822-e8fa-4f6d-b793-01d0e80ccb06\") " pod="openstack/swift-storage-0" Nov 26 22:41:09 crc kubenswrapper[4903]: I1126 22:41:09.599429 4903 generic.go:334] "Generic (PLEG): container finished" podID="f7711917-d475-44bb-8393-a803e720f32d" containerID="0ec011bf2c8ccdeb94b40891e819aa71331730ba9b78bad633720f4653f378ea" exitCode=0 Nov 26 22:41:09 crc kubenswrapper[4903]: I1126 22:41:09.599520 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-pbphv" event={"ID":"f7711917-d475-44bb-8393-a803e720f32d","Type":"ContainerDied","Data":"0ec011bf2c8ccdeb94b40891e819aa71331730ba9b78bad633720f4653f378ea"} Nov 26 22:41:09 crc kubenswrapper[4903]: I1126 22:41:09.599572 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-pbphv" event={"ID":"f7711917-d475-44bb-8393-a803e720f32d","Type":"ContainerStarted","Data":"948cb80c16cf9d486b61f1f29f291a412d302884d31ca854613768307ace8a41"} Nov 26 22:41:09 crc kubenswrapper[4903]: I1126 22:41:09.608384 4903 generic.go:334] "Generic (PLEG): container finished" podID="7ebba586-8f3a-4148-aec8-687eb566f1b0" containerID="50ae9f11d4069bdbd321bdedb9a5c1fa3807819bb9dbd8caa02117451dd0722a" exitCode=0 Nov 26 22:41:09 crc kubenswrapper[4903]: I1126 22:41:09.608478 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-f978-account-create-update-cxc2n" event={"ID":"7ebba586-8f3a-4148-aec8-687eb566f1b0","Type":"ContainerDied","Data":"50ae9f11d4069bdbd321bdedb9a5c1fa3807819bb9dbd8caa02117451dd0722a"} Nov 26 22:41:09 crc kubenswrapper[4903]: I1126 22:41:09.608511 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-f978-account-create-update-cxc2n" event={"ID":"7ebba586-8f3a-4148-aec8-687eb566f1b0","Type":"ContainerStarted","Data":"6d2fc5eb920c7513bbb0345ba5b6a73fa7ad82407277718a8e0a9c017c35c6ff"} Nov 26 22:41:09 crc kubenswrapper[4903]: I1126 22:41:09.611659 4903 generic.go:334] "Generic (PLEG): container finished" podID="be81f20b-b9ca-44bf-8aad-2cd7a10e44cc" containerID="f8129e80533f25e778d006f1a3b9cbb3e6ad4bab25565c3d58d92b2ddf019c04" exitCode=0 Nov 26 22:41:09 crc kubenswrapper[4903]: I1126 22:41:09.611721 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-wp2ng" event={"ID":"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc","Type":"ContainerDied","Data":"f8129e80533f25e778d006f1a3b9cbb3e6ad4bab25565c3d58d92b2ddf019c04"} Nov 26 22:41:09 crc kubenswrapper[4903]: I1126 22:41:09.750247 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 26 22:41:10 crc kubenswrapper[4903]: I1126 22:41:10.346900 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 26 22:41:10 crc kubenswrapper[4903]: I1126 22:41:10.629191 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f34b822-e8fa-4f6d-b793-01d0e80ccb06","Type":"ContainerStarted","Data":"5c7897aebc0bdc54cff8d2a664299c0cebbe66055ed20c6f29425e50b4ec82b1"} Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.157067 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-wp2ng" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.274030 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-pbphv" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.276157 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-f978-account-create-update-cxc2n" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.286327 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-scripts\") pod \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\" (UID: \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\") " Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.286592 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-ring-data-devices\") pod \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\" (UID: \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\") " Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.286678 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-combined-ca-bundle\") pod \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\" (UID: \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\") " Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.286779 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-dispersionconf\") pod \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\" (UID: \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\") " Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.286829 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vbgcc\" (UniqueName: \"kubernetes.io/projected/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-kube-api-access-vbgcc\") pod \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\" (UID: \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\") " Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.286935 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-swiftconf\") pod \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\" (UID: \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\") " Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.287065 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-etc-swift\") pod \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\" (UID: \"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc\") " Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.289522 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "be81f20b-b9ca-44bf-8aad-2cd7a10e44cc" (UID: "be81f20b-b9ca-44bf-8aad-2cd7a10e44cc"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.292087 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "be81f20b-b9ca-44bf-8aad-2cd7a10e44cc" (UID: "be81f20b-b9ca-44bf-8aad-2cd7a10e44cc"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.305977 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-kube-api-access-vbgcc" (OuterVolumeSpecName: "kube-api-access-vbgcc") pod "be81f20b-b9ca-44bf-8aad-2cd7a10e44cc" (UID: "be81f20b-b9ca-44bf-8aad-2cd7a10e44cc"). InnerVolumeSpecName "kube-api-access-vbgcc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.319550 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "be81f20b-b9ca-44bf-8aad-2cd7a10e44cc" (UID: "be81f20b-b9ca-44bf-8aad-2cd7a10e44cc"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.322550 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "be81f20b-b9ca-44bf-8aad-2cd7a10e44cc" (UID: "be81f20b-b9ca-44bf-8aad-2cd7a10e44cc"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.333761 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "be81f20b-b9ca-44bf-8aad-2cd7a10e44cc" (UID: "be81f20b-b9ca-44bf-8aad-2cd7a10e44cc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.347082 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-scripts" (OuterVolumeSpecName: "scripts") pod "be81f20b-b9ca-44bf-8aad-2cd7a10e44cc" (UID: "be81f20b-b9ca-44bf-8aad-2cd7a10e44cc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.389470 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7ebba586-8f3a-4148-aec8-687eb566f1b0-operator-scripts\") pod \"7ebba586-8f3a-4148-aec8-687eb566f1b0\" (UID: \"7ebba586-8f3a-4148-aec8-687eb566f1b0\") " Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.389642 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fb5wh\" (UniqueName: \"kubernetes.io/projected/7ebba586-8f3a-4148-aec8-687eb566f1b0-kube-api-access-fb5wh\") pod \"7ebba586-8f3a-4148-aec8-687eb566f1b0\" (UID: \"7ebba586-8f3a-4148-aec8-687eb566f1b0\") " Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.389737 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wk2xt\" (UniqueName: \"kubernetes.io/projected/f7711917-d475-44bb-8393-a803e720f32d-kube-api-access-wk2xt\") pod \"f7711917-d475-44bb-8393-a803e720f32d\" (UID: \"f7711917-d475-44bb-8393-a803e720f32d\") " Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.389764 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f7711917-d475-44bb-8393-a803e720f32d-operator-scripts\") pod \"f7711917-d475-44bb-8393-a803e720f32d\" (UID: \"f7711917-d475-44bb-8393-a803e720f32d\") " Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.390247 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ebba586-8f3a-4148-aec8-687eb566f1b0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7ebba586-8f3a-4148-aec8-687eb566f1b0" (UID: "7ebba586-8f3a-4148-aec8-687eb566f1b0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.390465 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f7711917-d475-44bb-8393-a803e720f32d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f7711917-d475-44bb-8393-a803e720f32d" (UID: "f7711917-d475-44bb-8393-a803e720f32d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.392992 4903 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f7711917-d475-44bb-8393-a803e720f32d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.393022 4903 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.393038 4903 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7ebba586-8f3a-4148-aec8-687eb566f1b0-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.393049 4903 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.393057 4903 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.393066 4903 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.393074 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.393083 4903 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.393092 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vbgcc\" (UniqueName: \"kubernetes.io/projected/be81f20b-b9ca-44bf-8aad-2cd7a10e44cc-kube-api-access-vbgcc\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.393158 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7711917-d475-44bb-8393-a803e720f32d-kube-api-access-wk2xt" (OuterVolumeSpecName: "kube-api-access-wk2xt") pod "f7711917-d475-44bb-8393-a803e720f32d" (UID: "f7711917-d475-44bb-8393-a803e720f32d"). InnerVolumeSpecName "kube-api-access-wk2xt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.394960 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ebba586-8f3a-4148-aec8-687eb566f1b0-kube-api-access-fb5wh" (OuterVolumeSpecName: "kube-api-access-fb5wh") pod "7ebba586-8f3a-4148-aec8-687eb566f1b0" (UID: "7ebba586-8f3a-4148-aec8-687eb566f1b0"). InnerVolumeSpecName "kube-api-access-fb5wh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.494426 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wk2xt\" (UniqueName: \"kubernetes.io/projected/f7711917-d475-44bb-8393-a803e720f32d-kube-api-access-wk2xt\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.494462 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fb5wh\" (UniqueName: \"kubernetes.io/projected/7ebba586-8f3a-4148-aec8-687eb566f1b0-kube-api-access-fb5wh\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.567782 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-kzb8j" podUID="1aa29ea2-aaab-435e-9995-41a5f137be03" containerName="ovn-controller" probeResult="failure" output=< Nov 26 22:41:11 crc kubenswrapper[4903]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 26 22:41:11 crc kubenswrapper[4903]: > Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.582842 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-mv2r4" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.592610 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-mv2r4" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.654082 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-pbphv" event={"ID":"f7711917-d475-44bb-8393-a803e720f32d","Type":"ContainerDied","Data":"948cb80c16cf9d486b61f1f29f291a412d302884d31ca854613768307ace8a41"} Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.654125 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="948cb80c16cf9d486b61f1f29f291a412d302884d31ca854613768307ace8a41" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.654189 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-pbphv" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.663027 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-f978-account-create-update-cxc2n" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.663620 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-f978-account-create-update-cxc2n" event={"ID":"7ebba586-8f3a-4148-aec8-687eb566f1b0","Type":"ContainerDied","Data":"6d2fc5eb920c7513bbb0345ba5b6a73fa7ad82407277718a8e0a9c017c35c6ff"} Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.663664 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6d2fc5eb920c7513bbb0345ba5b6a73fa7ad82407277718a8e0a9c017c35c6ff" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.675062 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-wp2ng" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.675125 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-wp2ng" event={"ID":"be81f20b-b9ca-44bf-8aad-2cd7a10e44cc","Type":"ContainerDied","Data":"86744a073028475bd70b98028922f20bf2964d15937ef1622958cad3ba1e3e07"} Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.675155 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="86744a073028475bd70b98028922f20bf2964d15937ef1622958cad3ba1e3e07" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.802736 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-kzb8j-config-vcd4b"] Nov 26 22:41:11 crc kubenswrapper[4903]: E1126 22:41:11.803540 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ebba586-8f3a-4148-aec8-687eb566f1b0" containerName="mariadb-account-create-update" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.803556 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ebba586-8f3a-4148-aec8-687eb566f1b0" containerName="mariadb-account-create-update" Nov 26 22:41:11 crc kubenswrapper[4903]: E1126 22:41:11.803592 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7711917-d475-44bb-8393-a803e720f32d" containerName="mariadb-database-create" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.803601 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7711917-d475-44bb-8393-a803e720f32d" containerName="mariadb-database-create" Nov 26 22:41:11 crc kubenswrapper[4903]: E1126 22:41:11.803612 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be81f20b-b9ca-44bf-8aad-2cd7a10e44cc" containerName="swift-ring-rebalance" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.803618 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="be81f20b-b9ca-44bf-8aad-2cd7a10e44cc" containerName="swift-ring-rebalance" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.803857 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ebba586-8f3a-4148-aec8-687eb566f1b0" containerName="mariadb-account-create-update" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.803874 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7711917-d475-44bb-8393-a803e720f32d" containerName="mariadb-database-create" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.803890 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="be81f20b-b9ca-44bf-8aad-2cd7a10e44cc" containerName="swift-ring-rebalance" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.804736 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-kzb8j-config-vcd4b" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.806908 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 26 22:41:11 crc kubenswrapper[4903]: I1126 22:41:11.813166 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-kzb8j-config-vcd4b"] Nov 26 22:41:12 crc kubenswrapper[4903]: I1126 22:41:12.006156 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/62e15a21-b3c3-43d1-b792-dde105c98f10-scripts\") pod \"ovn-controller-kzb8j-config-vcd4b\" (UID: \"62e15a21-b3c3-43d1-b792-dde105c98f10\") " pod="openstack/ovn-controller-kzb8j-config-vcd4b" Nov 26 22:41:12 crc kubenswrapper[4903]: I1126 22:41:12.006202 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/62e15a21-b3c3-43d1-b792-dde105c98f10-additional-scripts\") pod \"ovn-controller-kzb8j-config-vcd4b\" (UID: \"62e15a21-b3c3-43d1-b792-dde105c98f10\") " pod="openstack/ovn-controller-kzb8j-config-vcd4b" Nov 26 22:41:12 crc kubenswrapper[4903]: I1126 22:41:12.006232 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/62e15a21-b3c3-43d1-b792-dde105c98f10-var-run-ovn\") pod \"ovn-controller-kzb8j-config-vcd4b\" (UID: \"62e15a21-b3c3-43d1-b792-dde105c98f10\") " pod="openstack/ovn-controller-kzb8j-config-vcd4b" Nov 26 22:41:12 crc kubenswrapper[4903]: I1126 22:41:12.006369 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-47x7g\" (UniqueName: \"kubernetes.io/projected/62e15a21-b3c3-43d1-b792-dde105c98f10-kube-api-access-47x7g\") pod \"ovn-controller-kzb8j-config-vcd4b\" (UID: \"62e15a21-b3c3-43d1-b792-dde105c98f10\") " pod="openstack/ovn-controller-kzb8j-config-vcd4b" Nov 26 22:41:12 crc kubenswrapper[4903]: I1126 22:41:12.006419 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/62e15a21-b3c3-43d1-b792-dde105c98f10-var-log-ovn\") pod \"ovn-controller-kzb8j-config-vcd4b\" (UID: \"62e15a21-b3c3-43d1-b792-dde105c98f10\") " pod="openstack/ovn-controller-kzb8j-config-vcd4b" Nov 26 22:41:12 crc kubenswrapper[4903]: I1126 22:41:12.006705 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/62e15a21-b3c3-43d1-b792-dde105c98f10-var-run\") pod \"ovn-controller-kzb8j-config-vcd4b\" (UID: \"62e15a21-b3c3-43d1-b792-dde105c98f10\") " pod="openstack/ovn-controller-kzb8j-config-vcd4b" Nov 26 22:41:12 crc kubenswrapper[4903]: I1126 22:41:12.107916 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-47x7g\" (UniqueName: \"kubernetes.io/projected/62e15a21-b3c3-43d1-b792-dde105c98f10-kube-api-access-47x7g\") pod \"ovn-controller-kzb8j-config-vcd4b\" (UID: \"62e15a21-b3c3-43d1-b792-dde105c98f10\") " pod="openstack/ovn-controller-kzb8j-config-vcd4b" Nov 26 22:41:12 crc kubenswrapper[4903]: I1126 22:41:12.108220 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/62e15a21-b3c3-43d1-b792-dde105c98f10-var-log-ovn\") pod \"ovn-controller-kzb8j-config-vcd4b\" (UID: \"62e15a21-b3c3-43d1-b792-dde105c98f10\") " pod="openstack/ovn-controller-kzb8j-config-vcd4b" Nov 26 22:41:12 crc kubenswrapper[4903]: I1126 22:41:12.108251 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/62e15a21-b3c3-43d1-b792-dde105c98f10-var-run\") pod \"ovn-controller-kzb8j-config-vcd4b\" (UID: \"62e15a21-b3c3-43d1-b792-dde105c98f10\") " pod="openstack/ovn-controller-kzb8j-config-vcd4b" Nov 26 22:41:12 crc kubenswrapper[4903]: I1126 22:41:12.108291 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/62e15a21-b3c3-43d1-b792-dde105c98f10-scripts\") pod \"ovn-controller-kzb8j-config-vcd4b\" (UID: \"62e15a21-b3c3-43d1-b792-dde105c98f10\") " pod="openstack/ovn-controller-kzb8j-config-vcd4b" Nov 26 22:41:12 crc kubenswrapper[4903]: I1126 22:41:12.108310 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/62e15a21-b3c3-43d1-b792-dde105c98f10-additional-scripts\") pod \"ovn-controller-kzb8j-config-vcd4b\" (UID: \"62e15a21-b3c3-43d1-b792-dde105c98f10\") " pod="openstack/ovn-controller-kzb8j-config-vcd4b" Nov 26 22:41:12 crc kubenswrapper[4903]: I1126 22:41:12.108331 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/62e15a21-b3c3-43d1-b792-dde105c98f10-var-run-ovn\") pod \"ovn-controller-kzb8j-config-vcd4b\" (UID: \"62e15a21-b3c3-43d1-b792-dde105c98f10\") " pod="openstack/ovn-controller-kzb8j-config-vcd4b" Nov 26 22:41:12 crc kubenswrapper[4903]: I1126 22:41:12.108588 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/62e15a21-b3c3-43d1-b792-dde105c98f10-var-run-ovn\") pod \"ovn-controller-kzb8j-config-vcd4b\" (UID: \"62e15a21-b3c3-43d1-b792-dde105c98f10\") " pod="openstack/ovn-controller-kzb8j-config-vcd4b" Nov 26 22:41:12 crc kubenswrapper[4903]: I1126 22:41:12.108599 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/62e15a21-b3c3-43d1-b792-dde105c98f10-var-run\") pod \"ovn-controller-kzb8j-config-vcd4b\" (UID: \"62e15a21-b3c3-43d1-b792-dde105c98f10\") " pod="openstack/ovn-controller-kzb8j-config-vcd4b" Nov 26 22:41:12 crc kubenswrapper[4903]: I1126 22:41:12.108613 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/62e15a21-b3c3-43d1-b792-dde105c98f10-var-log-ovn\") pod \"ovn-controller-kzb8j-config-vcd4b\" (UID: \"62e15a21-b3c3-43d1-b792-dde105c98f10\") " pod="openstack/ovn-controller-kzb8j-config-vcd4b" Nov 26 22:41:12 crc kubenswrapper[4903]: I1126 22:41:12.111137 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/62e15a21-b3c3-43d1-b792-dde105c98f10-scripts\") pod \"ovn-controller-kzb8j-config-vcd4b\" (UID: \"62e15a21-b3c3-43d1-b792-dde105c98f10\") " pod="openstack/ovn-controller-kzb8j-config-vcd4b" Nov 26 22:41:12 crc kubenswrapper[4903]: I1126 22:41:12.112266 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 26 22:41:12 crc kubenswrapper[4903]: I1126 22:41:12.121249 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/62e15a21-b3c3-43d1-b792-dde105c98f10-additional-scripts\") pod \"ovn-controller-kzb8j-config-vcd4b\" (UID: \"62e15a21-b3c3-43d1-b792-dde105c98f10\") " pod="openstack/ovn-controller-kzb8j-config-vcd4b" Nov 26 22:41:12 crc kubenswrapper[4903]: I1126 22:41:12.125411 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-47x7g\" (UniqueName: \"kubernetes.io/projected/62e15a21-b3c3-43d1-b792-dde105c98f10-kube-api-access-47x7g\") pod \"ovn-controller-kzb8j-config-vcd4b\" (UID: \"62e15a21-b3c3-43d1-b792-dde105c98f10\") " pod="openstack/ovn-controller-kzb8j-config-vcd4b" Nov 26 22:41:12 crc kubenswrapper[4903]: I1126 22:41:12.136474 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-kzb8j-config-vcd4b" Nov 26 22:41:12 crc kubenswrapper[4903]: I1126 22:41:12.664880 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-kzb8j-config-vcd4b"] Nov 26 22:41:12 crc kubenswrapper[4903]: I1126 22:41:12.694967 4903 scope.go:117] "RemoveContainer" containerID="4337628323fe2089317ee7e01e71f8a52d313ed38b96ce04e29ed2cdf09ba7b7" Nov 26 22:41:12 crc kubenswrapper[4903]: W1126 22:41:12.709798 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod62e15a21_b3c3_43d1_b792_dde105c98f10.slice/crio-7b7e7eac8c8d4ba6cb3edcf780f0ad37ded85599bf46b6aa2a72fd8ba714ca52 WatchSource:0}: Error finding container 7b7e7eac8c8d4ba6cb3edcf780f0ad37ded85599bf46b6aa2a72fd8ba714ca52: Status 404 returned error can't find the container with id 7b7e7eac8c8d4ba6cb3edcf780f0ad37ded85599bf46b6aa2a72fd8ba714ca52 Nov 26 22:41:12 crc kubenswrapper[4903]: I1126 22:41:12.710300 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f34b822-e8fa-4f6d-b793-01d0e80ccb06","Type":"ContainerStarted","Data":"77da1233496a170e4a65d953909bb881cb37b84f0518c8cd25b075f8fea246f0"} Nov 26 22:41:12 crc kubenswrapper[4903]: I1126 22:41:12.710387 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f34b822-e8fa-4f6d-b793-01d0e80ccb06","Type":"ContainerStarted","Data":"dc22fda6a223ea5d0b0f479b116e7f12a1651762e7ed00c13a33bb65c484f89f"} Nov 26 22:41:12 crc kubenswrapper[4903]: I1126 22:41:12.710439 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f34b822-e8fa-4f6d-b793-01d0e80ccb06","Type":"ContainerStarted","Data":"c999b50956d4e9008604db9a823b4b86f538b8bb57b159b68dff7d3067ea3203"} Nov 26 22:41:12 crc kubenswrapper[4903]: I1126 22:41:12.814230 4903 scope.go:117] "RemoveContainer" containerID="782135ae538eea42fb48ff7ea916cd1133adcac31f66403acc864126cda113b3" Nov 26 22:41:12 crc kubenswrapper[4903]: I1126 22:41:12.869275 4903 scope.go:117] "RemoveContainer" containerID="c03cc5766080c6ef0bff977fa69dea5cd42092fc7624986dc88e3339c339f025" Nov 26 22:41:13 crc kubenswrapper[4903]: I1126 22:41:13.137683 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-0"] Nov 26 22:41:13 crc kubenswrapper[4903]: I1126 22:41:13.153813 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 26 22:41:13 crc kubenswrapper[4903]: I1126 22:41:13.154073 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Nov 26 22:41:13 crc kubenswrapper[4903]: I1126 22:41:13.155991 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-config-data" Nov 26 22:41:13 crc kubenswrapper[4903]: I1126 22:41:13.341467 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca375e39-4f68-4f91-be27-8b4975a0ea3c-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"ca375e39-4f68-4f91-be27-8b4975a0ea3c\") " pod="openstack/mysqld-exporter-0" Nov 26 22:41:13 crc kubenswrapper[4903]: I1126 22:41:13.341512 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca375e39-4f68-4f91-be27-8b4975a0ea3c-config-data\") pod \"mysqld-exporter-0\" (UID: \"ca375e39-4f68-4f91-be27-8b4975a0ea3c\") " pod="openstack/mysqld-exporter-0" Nov 26 22:41:13 crc kubenswrapper[4903]: I1126 22:41:13.341663 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s2kzp\" (UniqueName: \"kubernetes.io/projected/ca375e39-4f68-4f91-be27-8b4975a0ea3c-kube-api-access-s2kzp\") pod \"mysqld-exporter-0\" (UID: \"ca375e39-4f68-4f91-be27-8b4975a0ea3c\") " pod="openstack/mysqld-exporter-0" Nov 26 22:41:13 crc kubenswrapper[4903]: I1126 22:41:13.443527 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca375e39-4f68-4f91-be27-8b4975a0ea3c-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"ca375e39-4f68-4f91-be27-8b4975a0ea3c\") " pod="openstack/mysqld-exporter-0" Nov 26 22:41:13 crc kubenswrapper[4903]: I1126 22:41:13.443598 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca375e39-4f68-4f91-be27-8b4975a0ea3c-config-data\") pod \"mysqld-exporter-0\" (UID: \"ca375e39-4f68-4f91-be27-8b4975a0ea3c\") " pod="openstack/mysqld-exporter-0" Nov 26 22:41:13 crc kubenswrapper[4903]: I1126 22:41:13.443745 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kzp\" (UniqueName: \"kubernetes.io/projected/ca375e39-4f68-4f91-be27-8b4975a0ea3c-kube-api-access-s2kzp\") pod \"mysqld-exporter-0\" (UID: \"ca375e39-4f68-4f91-be27-8b4975a0ea3c\") " pod="openstack/mysqld-exporter-0" Nov 26 22:41:13 crc kubenswrapper[4903]: I1126 22:41:13.449859 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca375e39-4f68-4f91-be27-8b4975a0ea3c-config-data\") pod \"mysqld-exporter-0\" (UID: \"ca375e39-4f68-4f91-be27-8b4975a0ea3c\") " pod="openstack/mysqld-exporter-0" Nov 26 22:41:13 crc kubenswrapper[4903]: I1126 22:41:13.450406 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca375e39-4f68-4f91-be27-8b4975a0ea3c-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"ca375e39-4f68-4f91-be27-8b4975a0ea3c\") " pod="openstack/mysqld-exporter-0" Nov 26 22:41:13 crc kubenswrapper[4903]: I1126 22:41:13.462582 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kzp\" (UniqueName: \"kubernetes.io/projected/ca375e39-4f68-4f91-be27-8b4975a0ea3c-kube-api-access-s2kzp\") pod \"mysqld-exporter-0\" (UID: \"ca375e39-4f68-4f91-be27-8b4975a0ea3c\") " pod="openstack/mysqld-exporter-0" Nov 26 22:41:13 crc kubenswrapper[4903]: I1126 22:41:13.490916 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Nov 26 22:41:13 crc kubenswrapper[4903]: I1126 22:41:13.536366 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:13 crc kubenswrapper[4903]: I1126 22:41:13.540752 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:13 crc kubenswrapper[4903]: I1126 22:41:13.730812 4903 generic.go:334] "Generic (PLEG): container finished" podID="62e15a21-b3c3-43d1-b792-dde105c98f10" containerID="1d049408fa0a73a548c742ac563b8c3a2115d0b9a16ccce00190d9939da1947f" exitCode=0 Nov 26 22:41:13 crc kubenswrapper[4903]: I1126 22:41:13.730863 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-kzb8j-config-vcd4b" event={"ID":"62e15a21-b3c3-43d1-b792-dde105c98f10","Type":"ContainerDied","Data":"1d049408fa0a73a548c742ac563b8c3a2115d0b9a16ccce00190d9939da1947f"} Nov 26 22:41:13 crc kubenswrapper[4903]: I1126 22:41:13.730904 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-kzb8j-config-vcd4b" event={"ID":"62e15a21-b3c3-43d1-b792-dde105c98f10","Type":"ContainerStarted","Data":"7b7e7eac8c8d4ba6cb3edcf780f0ad37ded85599bf46b6aa2a72fd8ba714ca52"} Nov 26 22:41:13 crc kubenswrapper[4903]: I1126 22:41:13.737800 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f34b822-e8fa-4f6d-b793-01d0e80ccb06","Type":"ContainerStarted","Data":"f3868623e3a8e74e683665f889a9607d12e41bb400797f7f1307b78c1f4cde1f"} Nov 26 22:41:13 crc kubenswrapper[4903]: I1126 22:41:13.740416 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:13 crc kubenswrapper[4903]: I1126 22:41:13.994907 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 26 22:41:14 crc kubenswrapper[4903]: W1126 22:41:14.232934 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podca375e39_4f68_4f91_be27_8b4975a0ea3c.slice/crio-3f7ea076d91bad3d6b64e9e90cb7a7f566df209950672b78483ed74b7445ab83 WatchSource:0}: Error finding container 3f7ea076d91bad3d6b64e9e90cb7a7f566df209950672b78483ed74b7445ab83: Status 404 returned error can't find the container with id 3f7ea076d91bad3d6b64e9e90cb7a7f566df209950672b78483ed74b7445ab83 Nov 26 22:41:14 crc kubenswrapper[4903]: I1126 22:41:14.763109 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"ca375e39-4f68-4f91-be27-8b4975a0ea3c","Type":"ContainerStarted","Data":"3f7ea076d91bad3d6b64e9e90cb7a7f566df209950672b78483ed74b7445ab83"} Nov 26 22:41:14 crc kubenswrapper[4903]: I1126 22:41:14.768783 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f34b822-e8fa-4f6d-b793-01d0e80ccb06","Type":"ContainerStarted","Data":"db9a9826d88542f87ddabe367f3117581643afa8af8a7ec5cb3ee598aa531e5e"} Nov 26 22:41:14 crc kubenswrapper[4903]: I1126 22:41:14.768931 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f34b822-e8fa-4f6d-b793-01d0e80ccb06","Type":"ContainerStarted","Data":"eb2d8bf82037db0d7bf7efa6f97c6129d2ff28e64550354bed4e1962b90eec7c"} Nov 26 22:41:15 crc kubenswrapper[4903]: I1126 22:41:15.168415 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-kzb8j-config-vcd4b" Nov 26 22:41:15 crc kubenswrapper[4903]: I1126 22:41:15.310559 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/62e15a21-b3c3-43d1-b792-dde105c98f10-var-run\") pod \"62e15a21-b3c3-43d1-b792-dde105c98f10\" (UID: \"62e15a21-b3c3-43d1-b792-dde105c98f10\") " Nov 26 22:41:15 crc kubenswrapper[4903]: I1126 22:41:15.310733 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/62e15a21-b3c3-43d1-b792-dde105c98f10-var-run-ovn\") pod \"62e15a21-b3c3-43d1-b792-dde105c98f10\" (UID: \"62e15a21-b3c3-43d1-b792-dde105c98f10\") " Nov 26 22:41:15 crc kubenswrapper[4903]: I1126 22:41:15.310769 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/62e15a21-b3c3-43d1-b792-dde105c98f10-additional-scripts\") pod \"62e15a21-b3c3-43d1-b792-dde105c98f10\" (UID: \"62e15a21-b3c3-43d1-b792-dde105c98f10\") " Nov 26 22:41:15 crc kubenswrapper[4903]: I1126 22:41:15.311417 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/62e15a21-b3c3-43d1-b792-dde105c98f10-scripts\") pod \"62e15a21-b3c3-43d1-b792-dde105c98f10\" (UID: \"62e15a21-b3c3-43d1-b792-dde105c98f10\") " Nov 26 22:41:15 crc kubenswrapper[4903]: I1126 22:41:15.311493 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-47x7g\" (UniqueName: \"kubernetes.io/projected/62e15a21-b3c3-43d1-b792-dde105c98f10-kube-api-access-47x7g\") pod \"62e15a21-b3c3-43d1-b792-dde105c98f10\" (UID: \"62e15a21-b3c3-43d1-b792-dde105c98f10\") " Nov 26 22:41:15 crc kubenswrapper[4903]: I1126 22:41:15.311632 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/62e15a21-b3c3-43d1-b792-dde105c98f10-var-log-ovn\") pod \"62e15a21-b3c3-43d1-b792-dde105c98f10\" (UID: \"62e15a21-b3c3-43d1-b792-dde105c98f10\") " Nov 26 22:41:15 crc kubenswrapper[4903]: I1126 22:41:15.312154 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/62e15a21-b3c3-43d1-b792-dde105c98f10-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "62e15a21-b3c3-43d1-b792-dde105c98f10" (UID: "62e15a21-b3c3-43d1-b792-dde105c98f10"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:41:15 crc kubenswrapper[4903]: I1126 22:41:15.312188 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/62e15a21-b3c3-43d1-b792-dde105c98f10-var-run" (OuterVolumeSpecName: "var-run") pod "62e15a21-b3c3-43d1-b792-dde105c98f10" (UID: "62e15a21-b3c3-43d1-b792-dde105c98f10"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:41:15 crc kubenswrapper[4903]: I1126 22:41:15.312204 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/62e15a21-b3c3-43d1-b792-dde105c98f10-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "62e15a21-b3c3-43d1-b792-dde105c98f10" (UID: "62e15a21-b3c3-43d1-b792-dde105c98f10"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:41:15 crc kubenswrapper[4903]: I1126 22:41:15.313136 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62e15a21-b3c3-43d1-b792-dde105c98f10-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "62e15a21-b3c3-43d1-b792-dde105c98f10" (UID: "62e15a21-b3c3-43d1-b792-dde105c98f10"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:15 crc kubenswrapper[4903]: I1126 22:41:15.313726 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62e15a21-b3c3-43d1-b792-dde105c98f10-scripts" (OuterVolumeSpecName: "scripts") pod "62e15a21-b3c3-43d1-b792-dde105c98f10" (UID: "62e15a21-b3c3-43d1-b792-dde105c98f10"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:15 crc kubenswrapper[4903]: I1126 22:41:15.336903 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62e15a21-b3c3-43d1-b792-dde105c98f10-kube-api-access-47x7g" (OuterVolumeSpecName: "kube-api-access-47x7g") pod "62e15a21-b3c3-43d1-b792-dde105c98f10" (UID: "62e15a21-b3c3-43d1-b792-dde105c98f10"). InnerVolumeSpecName "kube-api-access-47x7g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:41:15 crc kubenswrapper[4903]: I1126 22:41:15.413926 4903 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/62e15a21-b3c3-43d1-b792-dde105c98f10-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:15 crc kubenswrapper[4903]: I1126 22:41:15.413964 4903 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/62e15a21-b3c3-43d1-b792-dde105c98f10-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:15 crc kubenswrapper[4903]: I1126 22:41:15.413974 4903 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/62e15a21-b3c3-43d1-b792-dde105c98f10-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:15 crc kubenswrapper[4903]: I1126 22:41:15.413982 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-47x7g\" (UniqueName: \"kubernetes.io/projected/62e15a21-b3c3-43d1-b792-dde105c98f10-kube-api-access-47x7g\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:15 crc kubenswrapper[4903]: I1126 22:41:15.413992 4903 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/62e15a21-b3c3-43d1-b792-dde105c98f10-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:15 crc kubenswrapper[4903]: I1126 22:41:15.413999 4903 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/62e15a21-b3c3-43d1-b792-dde105c98f10-var-run\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:15 crc kubenswrapper[4903]: I1126 22:41:15.790351 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-kzb8j-config-vcd4b" event={"ID":"62e15a21-b3c3-43d1-b792-dde105c98f10","Type":"ContainerDied","Data":"7b7e7eac8c8d4ba6cb3edcf780f0ad37ded85599bf46b6aa2a72fd8ba714ca52"} Nov 26 22:41:15 crc kubenswrapper[4903]: I1126 22:41:15.790611 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7b7e7eac8c8d4ba6cb3edcf780f0ad37ded85599bf46b6aa2a72fd8ba714ca52" Nov 26 22:41:15 crc kubenswrapper[4903]: I1126 22:41:15.790673 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-kzb8j-config-vcd4b" Nov 26 22:41:15 crc kubenswrapper[4903]: I1126 22:41:15.806034 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f34b822-e8fa-4f6d-b793-01d0e80ccb06","Type":"ContainerStarted","Data":"711b8687d2aa3487265629c2ed8d9f24bb5177ffa49a1248f0ae544c8074d291"} Nov 26 22:41:15 crc kubenswrapper[4903]: I1126 22:41:15.806075 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f34b822-e8fa-4f6d-b793-01d0e80ccb06","Type":"ContainerStarted","Data":"04f1ccd3e74187b41090647742820b626d2e926c19f399801ecf6c7d1b935e6d"} Nov 26 22:41:16 crc kubenswrapper[4903]: I1126 22:41:16.113782 4903 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod9cd97ea2-042b-4730-8f01-76fdff497904"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod9cd97ea2-042b-4730-8f01-76fdff497904] : Timed out while waiting for systemd to remove kubepods-besteffort-pod9cd97ea2_042b_4730_8f01_76fdff497904.slice" Nov 26 22:41:16 crc kubenswrapper[4903]: E1126 22:41:16.113842 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort pod9cd97ea2-042b-4730-8f01-76fdff497904] : unable to destroy cgroup paths for cgroup [kubepods besteffort pod9cd97ea2-042b-4730-8f01-76fdff497904] : Timed out while waiting for systemd to remove kubepods-besteffort-pod9cd97ea2_042b_4730_8f01_76fdff497904.slice" pod="openstack/dnsmasq-dns-7fd796d7df-f8nz2" podUID="9cd97ea2-042b-4730-8f01-76fdff497904" Nov 26 22:41:16 crc kubenswrapper[4903]: I1126 22:41:16.267473 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-kzb8j-config-vcd4b"] Nov 26 22:41:16 crc kubenswrapper[4903]: I1126 22:41:16.282848 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-kzb8j-config-vcd4b"] Nov 26 22:41:16 crc kubenswrapper[4903]: I1126 22:41:16.516090 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 26 22:41:16 crc kubenswrapper[4903]: I1126 22:41:16.626652 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-kzb8j" Nov 26 22:41:16 crc kubenswrapper[4903]: I1126 22:41:16.813323 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-f8nz2" Nov 26 22:41:16 crc kubenswrapper[4903]: I1126 22:41:16.849577 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-f8nz2"] Nov 26 22:41:16 crc kubenswrapper[4903]: I1126 22:41:16.861685 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-f8nz2"] Nov 26 22:41:16 crc kubenswrapper[4903]: I1126 22:41:16.892431 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-s2ggb"] Nov 26 22:41:16 crc kubenswrapper[4903]: E1126 22:41:16.892901 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62e15a21-b3c3-43d1-b792-dde105c98f10" containerName="ovn-config" Nov 26 22:41:16 crc kubenswrapper[4903]: I1126 22:41:16.892920 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="62e15a21-b3c3-43d1-b792-dde105c98f10" containerName="ovn-config" Nov 26 22:41:16 crc kubenswrapper[4903]: I1126 22:41:16.893134 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="62e15a21-b3c3-43d1-b792-dde105c98f10" containerName="ovn-config" Nov 26 22:41:16 crc kubenswrapper[4903]: I1126 22:41:16.893859 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-s2ggb" Nov 26 22:41:16 crc kubenswrapper[4903]: I1126 22:41:16.903714 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-s2ggb"] Nov 26 22:41:16 crc kubenswrapper[4903]: I1126 22:41:16.937843 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:41:16 crc kubenswrapper[4903]: I1126 22:41:16.960999 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-create-6x6mf"] Nov 26 22:41:16 crc kubenswrapper[4903]: I1126 22:41:16.961872 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd-operator-scripts\") pod \"barbican-db-create-s2ggb\" (UID: \"7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd\") " pod="openstack/barbican-db-create-s2ggb" Nov 26 22:41:16 crc kubenswrapper[4903]: I1126 22:41:16.961945 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6m7w8\" (UniqueName: \"kubernetes.io/projected/7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd-kube-api-access-6m7w8\") pod \"barbican-db-create-s2ggb\" (UID: \"7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd\") " pod="openstack/barbican-db-create-s2ggb" Nov 26 22:41:16 crc kubenswrapper[4903]: I1126 22:41:16.962236 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-6x6mf" Nov 26 22:41:16 crc kubenswrapper[4903]: I1126 22:41:16.973748 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-020d-account-create-update-ll6gr"] Nov 26 22:41:16 crc kubenswrapper[4903]: I1126 22:41:16.975075 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-020d-account-create-update-ll6gr" Nov 26 22:41:16 crc kubenswrapper[4903]: I1126 22:41:16.978042 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 26 22:41:16 crc kubenswrapper[4903]: I1126 22:41:16.984904 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-6x6mf"] Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.064891 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gp7sg\" (UniqueName: \"kubernetes.io/projected/41b40a6f-4842-4a24-8cff-cf57fd96bfdd-kube-api-access-gp7sg\") pod \"cinder-020d-account-create-update-ll6gr\" (UID: \"41b40a6f-4842-4a24-8cff-cf57fd96bfdd\") " pod="openstack/cinder-020d-account-create-update-ll6gr" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.064968 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd-operator-scripts\") pod \"barbican-db-create-s2ggb\" (UID: \"7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd\") " pod="openstack/barbican-db-create-s2ggb" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.065003 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwlzm\" (UniqueName: \"kubernetes.io/projected/f4f63771-5b16-4801-b549-f51085e05d23-kube-api-access-dwlzm\") pod \"heat-db-create-6x6mf\" (UID: \"f4f63771-5b16-4801-b549-f51085e05d23\") " pod="openstack/heat-db-create-6x6mf" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.065027 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/41b40a6f-4842-4a24-8cff-cf57fd96bfdd-operator-scripts\") pod \"cinder-020d-account-create-update-ll6gr\" (UID: \"41b40a6f-4842-4a24-8cff-cf57fd96bfdd\") " pod="openstack/cinder-020d-account-create-update-ll6gr" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.065065 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6m7w8\" (UniqueName: \"kubernetes.io/projected/7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd-kube-api-access-6m7w8\") pod \"barbican-db-create-s2ggb\" (UID: \"7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd\") " pod="openstack/barbican-db-create-s2ggb" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.065108 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f4f63771-5b16-4801-b549-f51085e05d23-operator-scripts\") pod \"heat-db-create-6x6mf\" (UID: \"f4f63771-5b16-4801-b549-f51085e05d23\") " pod="openstack/heat-db-create-6x6mf" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.066739 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd-operator-scripts\") pod \"barbican-db-create-s2ggb\" (UID: \"7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd\") " pod="openstack/barbican-db-create-s2ggb" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.076292 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-020d-account-create-update-ll6gr"] Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.099126 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6m7w8\" (UniqueName: \"kubernetes.io/projected/7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd-kube-api-access-6m7w8\") pod \"barbican-db-create-s2ggb\" (UID: \"7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd\") " pod="openstack/barbican-db-create-s2ggb" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.106259 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-4fb4-account-create-update-gwgf9"] Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.107702 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-4fb4-account-create-update-gwgf9" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.110433 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-4fb4-account-create-update-gwgf9"] Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.115033 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-db-secret" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.163442 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-tl7bs"] Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.165430 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-tl7bs" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.167078 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f4f63771-5b16-4801-b549-f51085e05d23-operator-scripts\") pod \"heat-db-create-6x6mf\" (UID: \"f4f63771-5b16-4801-b549-f51085e05d23\") " pod="openstack/heat-db-create-6x6mf" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.167180 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/120af257-f4df-4cb8-ab06-baa9eaaab9b6-operator-scripts\") pod \"heat-4fb4-account-create-update-gwgf9\" (UID: \"120af257-f4df-4cb8-ab06-baa9eaaab9b6\") " pod="openstack/heat-4fb4-account-create-update-gwgf9" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.167207 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8cqpt\" (UniqueName: \"kubernetes.io/projected/120af257-f4df-4cb8-ab06-baa9eaaab9b6-kube-api-access-8cqpt\") pod \"heat-4fb4-account-create-update-gwgf9\" (UID: \"120af257-f4df-4cb8-ab06-baa9eaaab9b6\") " pod="openstack/heat-4fb4-account-create-update-gwgf9" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.167240 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gp7sg\" (UniqueName: \"kubernetes.io/projected/41b40a6f-4842-4a24-8cff-cf57fd96bfdd-kube-api-access-gp7sg\") pod \"cinder-020d-account-create-update-ll6gr\" (UID: \"41b40a6f-4842-4a24-8cff-cf57fd96bfdd\") " pod="openstack/cinder-020d-account-create-update-ll6gr" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.167299 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwlzm\" (UniqueName: \"kubernetes.io/projected/f4f63771-5b16-4801-b549-f51085e05d23-kube-api-access-dwlzm\") pod \"heat-db-create-6x6mf\" (UID: \"f4f63771-5b16-4801-b549-f51085e05d23\") " pod="openstack/heat-db-create-6x6mf" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.167324 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/41b40a6f-4842-4a24-8cff-cf57fd96bfdd-operator-scripts\") pod \"cinder-020d-account-create-update-ll6gr\" (UID: \"41b40a6f-4842-4a24-8cff-cf57fd96bfdd\") " pod="openstack/cinder-020d-account-create-update-ll6gr" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.168238 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f4f63771-5b16-4801-b549-f51085e05d23-operator-scripts\") pod \"heat-db-create-6x6mf\" (UID: \"f4f63771-5b16-4801-b549-f51085e05d23\") " pod="openstack/heat-db-create-6x6mf" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.168974 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/41b40a6f-4842-4a24-8cff-cf57fd96bfdd-operator-scripts\") pod \"cinder-020d-account-create-update-ll6gr\" (UID: \"41b40a6f-4842-4a24-8cff-cf57fd96bfdd\") " pod="openstack/cinder-020d-account-create-update-ll6gr" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.170793 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.185111 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-pdkfz" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.185422 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.185551 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.186498 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.186766 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="4d75ba9f-0873-4d65-b0c9-5347134bfcce" containerName="prometheus" containerID="cri-o://a6a19313ea9c260a0231b410b7ade60ed9788d7161b5121cf0204087d4d1df8d" gracePeriod=600 Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.186896 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="4d75ba9f-0873-4d65-b0c9-5347134bfcce" containerName="thanos-sidecar" containerID="cri-o://4903447e5ba140e892d2c054b0a62788eae2b2ad99a4b2788c389b81207df939" gracePeriod=600 Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.186923 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="4d75ba9f-0873-4d65-b0c9-5347134bfcce" containerName="config-reloader" containerID="cri-o://dfd9e5a4795c099ca8187b350c89fd8ca7cb66598f49cca978ceb1d28588cb7e" gracePeriod=600 Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.215635 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-ll4x9"] Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.222805 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-ll4x9" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.237855 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-tl7bs"] Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.238479 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwlzm\" (UniqueName: \"kubernetes.io/projected/f4f63771-5b16-4801-b549-f51085e05d23-kube-api-access-dwlzm\") pod \"heat-db-create-6x6mf\" (UID: \"f4f63771-5b16-4801-b549-f51085e05d23\") " pod="openstack/heat-db-create-6x6mf" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.241222 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gp7sg\" (UniqueName: \"kubernetes.io/projected/41b40a6f-4842-4a24-8cff-cf57fd96bfdd-kube-api-access-gp7sg\") pod \"cinder-020d-account-create-update-ll6gr\" (UID: \"41b40a6f-4842-4a24-8cff-cf57fd96bfdd\") " pod="openstack/cinder-020d-account-create-update-ll6gr" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.246989 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-s2ggb" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.251787 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-ll4x9"] Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.270647 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfa72389-fa99-4e21-95c4-ca6a19783753-config-data\") pod \"keystone-db-sync-tl7bs\" (UID: \"bfa72389-fa99-4e21-95c4-ca6a19783753\") " pod="openstack/keystone-db-sync-tl7bs" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.270774 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfa72389-fa99-4e21-95c4-ca6a19783753-combined-ca-bundle\") pod \"keystone-db-sync-tl7bs\" (UID: \"bfa72389-fa99-4e21-95c4-ca6a19783753\") " pod="openstack/keystone-db-sync-tl7bs" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.270817 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/120af257-f4df-4cb8-ab06-baa9eaaab9b6-operator-scripts\") pod \"heat-4fb4-account-create-update-gwgf9\" (UID: \"120af257-f4df-4cb8-ab06-baa9eaaab9b6\") " pod="openstack/heat-4fb4-account-create-update-gwgf9" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.270841 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8cqpt\" (UniqueName: \"kubernetes.io/projected/120af257-f4df-4cb8-ab06-baa9eaaab9b6-kube-api-access-8cqpt\") pod \"heat-4fb4-account-create-update-gwgf9\" (UID: \"120af257-f4df-4cb8-ab06-baa9eaaab9b6\") " pod="openstack/heat-4fb4-account-create-update-gwgf9" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.270875 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wbx46\" (UniqueName: \"kubernetes.io/projected/bfa72389-fa99-4e21-95c4-ca6a19783753-kube-api-access-wbx46\") pod \"keystone-db-sync-tl7bs\" (UID: \"bfa72389-fa99-4e21-95c4-ca6a19783753\") " pod="openstack/keystone-db-sync-tl7bs" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.271685 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/120af257-f4df-4cb8-ab06-baa9eaaab9b6-operator-scripts\") pod \"heat-4fb4-account-create-update-gwgf9\" (UID: \"120af257-f4df-4cb8-ab06-baa9eaaab9b6\") " pod="openstack/heat-4fb4-account-create-update-gwgf9" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.289113 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-2908-account-create-update-dhdsh"] Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.291513 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-2908-account-create-update-dhdsh" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.292263 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-6x6mf" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.299797 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.308204 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-020d-account-create-update-ll6gr" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.311589 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-2908-account-create-update-dhdsh"] Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.329490 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8cqpt\" (UniqueName: \"kubernetes.io/projected/120af257-f4df-4cb8-ab06-baa9eaaab9b6-kube-api-access-8cqpt\") pod \"heat-4fb4-account-create-update-gwgf9\" (UID: \"120af257-f4df-4cb8-ab06-baa9eaaab9b6\") " pod="openstack/heat-4fb4-account-create-update-gwgf9" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.400873 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a15d74f-ad69-40ad-a811-3de51ff0f4e9-operator-scripts\") pod \"barbican-2908-account-create-update-dhdsh\" (UID: \"7a15d74f-ad69-40ad-a811-3de51ff0f4e9\") " pod="openstack/barbican-2908-account-create-update-dhdsh" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.401112 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfa72389-fa99-4e21-95c4-ca6a19783753-config-data\") pod \"keystone-db-sync-tl7bs\" (UID: \"bfa72389-fa99-4e21-95c4-ca6a19783753\") " pod="openstack/keystone-db-sync-tl7bs" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.401183 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55b6735a-7c30-4cf1-86a9-d61e408ee84d-operator-scripts\") pod \"cinder-db-create-ll4x9\" (UID: \"55b6735a-7c30-4cf1-86a9-d61e408ee84d\") " pod="openstack/cinder-db-create-ll4x9" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.408446 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jll6q\" (UniqueName: \"kubernetes.io/projected/7a15d74f-ad69-40ad-a811-3de51ff0f4e9-kube-api-access-jll6q\") pod \"barbican-2908-account-create-update-dhdsh\" (UID: \"7a15d74f-ad69-40ad-a811-3de51ff0f4e9\") " pod="openstack/barbican-2908-account-create-update-dhdsh" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.408682 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfa72389-fa99-4e21-95c4-ca6a19783753-combined-ca-bundle\") pod \"keystone-db-sync-tl7bs\" (UID: \"bfa72389-fa99-4e21-95c4-ca6a19783753\") " pod="openstack/keystone-db-sync-tl7bs" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.408765 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q8lsh\" (UniqueName: \"kubernetes.io/projected/55b6735a-7c30-4cf1-86a9-d61e408ee84d-kube-api-access-q8lsh\") pod \"cinder-db-create-ll4x9\" (UID: \"55b6735a-7c30-4cf1-86a9-d61e408ee84d\") " pod="openstack/cinder-db-create-ll4x9" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.408857 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wbx46\" (UniqueName: \"kubernetes.io/projected/bfa72389-fa99-4e21-95c4-ca6a19783753-kube-api-access-wbx46\") pod \"keystone-db-sync-tl7bs\" (UID: \"bfa72389-fa99-4e21-95c4-ca6a19783753\") " pod="openstack/keystone-db-sync-tl7bs" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.411395 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfa72389-fa99-4e21-95c4-ca6a19783753-config-data\") pod \"keystone-db-sync-tl7bs\" (UID: \"bfa72389-fa99-4e21-95c4-ca6a19783753\") " pod="openstack/keystone-db-sync-tl7bs" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.429363 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfa72389-fa99-4e21-95c4-ca6a19783753-combined-ca-bundle\") pod \"keystone-db-sync-tl7bs\" (UID: \"bfa72389-fa99-4e21-95c4-ca6a19783753\") " pod="openstack/keystone-db-sync-tl7bs" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.455316 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wbx46\" (UniqueName: \"kubernetes.io/projected/bfa72389-fa99-4e21-95c4-ca6a19783753-kube-api-access-wbx46\") pod \"keystone-db-sync-tl7bs\" (UID: \"bfa72389-fa99-4e21-95c4-ca6a19783753\") " pod="openstack/keystone-db-sync-tl7bs" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.456768 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-4fb4-account-create-update-gwgf9" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.496428 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-m6b9x"] Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.496897 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-tl7bs" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.497942 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-m6b9x" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.510910 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55b6735a-7c30-4cf1-86a9-d61e408ee84d-operator-scripts\") pod \"cinder-db-create-ll4x9\" (UID: \"55b6735a-7c30-4cf1-86a9-d61e408ee84d\") " pod="openstack/cinder-db-create-ll4x9" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.510961 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jll6q\" (UniqueName: \"kubernetes.io/projected/7a15d74f-ad69-40ad-a811-3de51ff0f4e9-kube-api-access-jll6q\") pod \"barbican-2908-account-create-update-dhdsh\" (UID: \"7a15d74f-ad69-40ad-a811-3de51ff0f4e9\") " pod="openstack/barbican-2908-account-create-update-dhdsh" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.511032 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q8lsh\" (UniqueName: \"kubernetes.io/projected/55b6735a-7c30-4cf1-86a9-d61e408ee84d-kube-api-access-q8lsh\") pod \"cinder-db-create-ll4x9\" (UID: \"55b6735a-7c30-4cf1-86a9-d61e408ee84d\") " pod="openstack/cinder-db-create-ll4x9" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.511079 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a15d74f-ad69-40ad-a811-3de51ff0f4e9-operator-scripts\") pod \"barbican-2908-account-create-update-dhdsh\" (UID: \"7a15d74f-ad69-40ad-a811-3de51ff0f4e9\") " pod="openstack/barbican-2908-account-create-update-dhdsh" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.511663 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a15d74f-ad69-40ad-a811-3de51ff0f4e9-operator-scripts\") pod \"barbican-2908-account-create-update-dhdsh\" (UID: \"7a15d74f-ad69-40ad-a811-3de51ff0f4e9\") " pod="openstack/barbican-2908-account-create-update-dhdsh" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.512119 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55b6735a-7c30-4cf1-86a9-d61e408ee84d-operator-scripts\") pod \"cinder-db-create-ll4x9\" (UID: \"55b6735a-7c30-4cf1-86a9-d61e408ee84d\") " pod="openstack/cinder-db-create-ll4x9" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.526704 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-m6b9x"] Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.547772 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q8lsh\" (UniqueName: \"kubernetes.io/projected/55b6735a-7c30-4cf1-86a9-d61e408ee84d-kube-api-access-q8lsh\") pod \"cinder-db-create-ll4x9\" (UID: \"55b6735a-7c30-4cf1-86a9-d61e408ee84d\") " pod="openstack/cinder-db-create-ll4x9" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.561182 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jll6q\" (UniqueName: \"kubernetes.io/projected/7a15d74f-ad69-40ad-a811-3de51ff0f4e9-kube-api-access-jll6q\") pod \"barbican-2908-account-create-update-dhdsh\" (UID: \"7a15d74f-ad69-40ad-a811-3de51ff0f4e9\") " pod="openstack/barbican-2908-account-create-update-dhdsh" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.568161 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-f7fd-account-create-update-m5lqr"] Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.569498 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-f7fd-account-create-update-m5lqr" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.573999 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.594458 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-f7fd-account-create-update-m5lqr"] Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.619229 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2b33485e-2d65-4308-aee1-eb14a019f91f-operator-scripts\") pod \"neutron-f7fd-account-create-update-m5lqr\" (UID: \"2b33485e-2d65-4308-aee1-eb14a019f91f\") " pod="openstack/neutron-f7fd-account-create-update-m5lqr" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.619336 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/66d689d6-dee8-4ed9-a354-343757962010-operator-scripts\") pod \"neutron-db-create-m6b9x\" (UID: \"66d689d6-dee8-4ed9-a354-343757962010\") " pod="openstack/neutron-db-create-m6b9x" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.619360 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lbz7t\" (UniqueName: \"kubernetes.io/projected/2b33485e-2d65-4308-aee1-eb14a019f91f-kube-api-access-lbz7t\") pod \"neutron-f7fd-account-create-update-m5lqr\" (UID: \"2b33485e-2d65-4308-aee1-eb14a019f91f\") " pod="openstack/neutron-f7fd-account-create-update-m5lqr" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.619409 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nlng9\" (UniqueName: \"kubernetes.io/projected/66d689d6-dee8-4ed9-a354-343757962010-kube-api-access-nlng9\") pod \"neutron-db-create-m6b9x\" (UID: \"66d689d6-dee8-4ed9-a354-343757962010\") " pod="openstack/neutron-db-create-m6b9x" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.624068 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-ll4x9" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.721345 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2b33485e-2d65-4308-aee1-eb14a019f91f-operator-scripts\") pod \"neutron-f7fd-account-create-update-m5lqr\" (UID: \"2b33485e-2d65-4308-aee1-eb14a019f91f\") " pod="openstack/neutron-f7fd-account-create-update-m5lqr" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.721463 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/66d689d6-dee8-4ed9-a354-343757962010-operator-scripts\") pod \"neutron-db-create-m6b9x\" (UID: \"66d689d6-dee8-4ed9-a354-343757962010\") " pod="openstack/neutron-db-create-m6b9x" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.721493 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lbz7t\" (UniqueName: \"kubernetes.io/projected/2b33485e-2d65-4308-aee1-eb14a019f91f-kube-api-access-lbz7t\") pod \"neutron-f7fd-account-create-update-m5lqr\" (UID: \"2b33485e-2d65-4308-aee1-eb14a019f91f\") " pod="openstack/neutron-f7fd-account-create-update-m5lqr" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.721544 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nlng9\" (UniqueName: \"kubernetes.io/projected/66d689d6-dee8-4ed9-a354-343757962010-kube-api-access-nlng9\") pod \"neutron-db-create-m6b9x\" (UID: \"66d689d6-dee8-4ed9-a354-343757962010\") " pod="openstack/neutron-db-create-m6b9x" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.722147 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2b33485e-2d65-4308-aee1-eb14a019f91f-operator-scripts\") pod \"neutron-f7fd-account-create-update-m5lqr\" (UID: \"2b33485e-2d65-4308-aee1-eb14a019f91f\") " pod="openstack/neutron-f7fd-account-create-update-m5lqr" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.722274 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/66d689d6-dee8-4ed9-a354-343757962010-operator-scripts\") pod \"neutron-db-create-m6b9x\" (UID: \"66d689d6-dee8-4ed9-a354-343757962010\") " pod="openstack/neutron-db-create-m6b9x" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.736899 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lbz7t\" (UniqueName: \"kubernetes.io/projected/2b33485e-2d65-4308-aee1-eb14a019f91f-kube-api-access-lbz7t\") pod \"neutron-f7fd-account-create-update-m5lqr\" (UID: \"2b33485e-2d65-4308-aee1-eb14a019f91f\") " pod="openstack/neutron-f7fd-account-create-update-m5lqr" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.741342 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nlng9\" (UniqueName: \"kubernetes.io/projected/66d689d6-dee8-4ed9-a354-343757962010-kube-api-access-nlng9\") pod \"neutron-db-create-m6b9x\" (UID: \"66d689d6-dee8-4ed9-a354-343757962010\") " pod="openstack/neutron-db-create-m6b9x" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.783464 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-2908-account-create-update-dhdsh" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.824616 4903 generic.go:334] "Generic (PLEG): container finished" podID="4d75ba9f-0873-4d65-b0c9-5347134bfcce" containerID="4903447e5ba140e892d2c054b0a62788eae2b2ad99a4b2788c389b81207df939" exitCode=0 Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.824648 4903 generic.go:334] "Generic (PLEG): container finished" podID="4d75ba9f-0873-4d65-b0c9-5347134bfcce" containerID="dfd9e5a4795c099ca8187b350c89fd8ca7cb66598f49cca978ceb1d28588cb7e" exitCode=0 Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.824657 4903 generic.go:334] "Generic (PLEG): container finished" podID="4d75ba9f-0873-4d65-b0c9-5347134bfcce" containerID="a6a19313ea9c260a0231b410b7ade60ed9788d7161b5121cf0204087d4d1df8d" exitCode=0 Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.824678 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"4d75ba9f-0873-4d65-b0c9-5347134bfcce","Type":"ContainerDied","Data":"4903447e5ba140e892d2c054b0a62788eae2b2ad99a4b2788c389b81207df939"} Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.824724 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"4d75ba9f-0873-4d65-b0c9-5347134bfcce","Type":"ContainerDied","Data":"dfd9e5a4795c099ca8187b350c89fd8ca7cb66598f49cca978ceb1d28588cb7e"} Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.824738 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"4d75ba9f-0873-4d65-b0c9-5347134bfcce","Type":"ContainerDied","Data":"a6a19313ea9c260a0231b410b7ade60ed9788d7161b5121cf0204087d4d1df8d"} Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.889218 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-m6b9x" Nov 26 22:41:17 crc kubenswrapper[4903]: I1126 22:41:17.917666 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-f7fd-account-create-update-m5lqr" Nov 26 22:41:18 crc kubenswrapper[4903]: I1126 22:41:18.040637 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62e15a21-b3c3-43d1-b792-dde105c98f10" path="/var/lib/kubelet/pods/62e15a21-b3c3-43d1-b792-dde105c98f10/volumes" Nov 26 22:41:18 crc kubenswrapper[4903]: I1126 22:41:18.041151 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9cd97ea2-042b-4730-8f01-76fdff497904" path="/var/lib/kubelet/pods/9cd97ea2-042b-4730-8f01-76fdff497904/volumes" Nov 26 22:41:18 crc kubenswrapper[4903]: I1126 22:41:18.535770 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="4d75ba9f-0873-4d65-b0c9-5347134bfcce" containerName="prometheus" probeResult="failure" output="Get \"http://10.217.0.137:9090/-/ready\": dial tcp 10.217.0.137:9090: connect: connection refused" Nov 26 22:41:23 crc kubenswrapper[4903]: I1126 22:41:23.536785 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="4d75ba9f-0873-4d65-b0c9-5347134bfcce" containerName="prometheus" probeResult="failure" output="Get \"http://10.217.0.137:9090/-/ready\": dial tcp 10.217.0.137:9090: connect: connection refused" Nov 26 22:41:25 crc kubenswrapper[4903]: E1126 22:41:25.644412 4903 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api:current-podified" Nov 26 22:41:25 crc kubenswrapper[4903]: E1126 22:41:25.645152 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-74hsf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-tz77l_openstack(2388445a-1656-41aa-8daa-a120993c24ad): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 22:41:25 crc kubenswrapper[4903]: E1126 22:41:25.648029 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-tz77l" podUID="2388445a-1656-41aa-8daa-a120993c24ad" Nov 26 22:41:25 crc kubenswrapper[4903]: E1126 22:41:25.914599 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api:current-podified\\\"\"" pod="openstack/glance-db-sync-tz77l" podUID="2388445a-1656-41aa-8daa-a120993c24ad" Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.234657 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.341191 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4d75ba9f-0873-4d65-b0c9-5347134bfcce-config\") pod \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.341333 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/4d75ba9f-0873-4d65-b0c9-5347134bfcce-prometheus-metric-storage-rulefiles-0\") pod \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.341460 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/4d75ba9f-0873-4d65-b0c9-5347134bfcce-web-config\") pod \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.341614 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-52882e42-0fcf-4f74-a4db-25081e0470db\") pod \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.341745 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/4d75ba9f-0873-4d65-b0c9-5347134bfcce-thanos-prometheus-http-client-file\") pod \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.341873 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/4d75ba9f-0873-4d65-b0c9-5347134bfcce-config-out\") pod \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.342022 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/4d75ba9f-0873-4d65-b0c9-5347134bfcce-tls-assets\") pod \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.342214 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qn9bq\" (UniqueName: \"kubernetes.io/projected/4d75ba9f-0873-4d65-b0c9-5347134bfcce-kube-api-access-qn9bq\") pod \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\" (UID: \"4d75ba9f-0873-4d65-b0c9-5347134bfcce\") " Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.342283 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d75ba9f-0873-4d65-b0c9-5347134bfcce-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "4d75ba9f-0873-4d65-b0c9-5347134bfcce" (UID: "4d75ba9f-0873-4d65-b0c9-5347134bfcce"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.342763 4903 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/4d75ba9f-0873-4d65-b0c9-5347134bfcce-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.352680 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d75ba9f-0873-4d65-b0c9-5347134bfcce-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "4d75ba9f-0873-4d65-b0c9-5347134bfcce" (UID: "4d75ba9f-0873-4d65-b0c9-5347134bfcce"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.354396 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d75ba9f-0873-4d65-b0c9-5347134bfcce-config" (OuterVolumeSpecName: "config") pod "4d75ba9f-0873-4d65-b0c9-5347134bfcce" (UID: "4d75ba9f-0873-4d65-b0c9-5347134bfcce"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.354428 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4d75ba9f-0873-4d65-b0c9-5347134bfcce-config-out" (OuterVolumeSpecName: "config-out") pod "4d75ba9f-0873-4d65-b0c9-5347134bfcce" (UID: "4d75ba9f-0873-4d65-b0c9-5347134bfcce"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.356851 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d75ba9f-0873-4d65-b0c9-5347134bfcce-kube-api-access-qn9bq" (OuterVolumeSpecName: "kube-api-access-qn9bq") pod "4d75ba9f-0873-4d65-b0c9-5347134bfcce" (UID: "4d75ba9f-0873-4d65-b0c9-5347134bfcce"). InnerVolumeSpecName "kube-api-access-qn9bq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.366541 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d75ba9f-0873-4d65-b0c9-5347134bfcce-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "4d75ba9f-0873-4d65-b0c9-5347134bfcce" (UID: "4d75ba9f-0873-4d65-b0c9-5347134bfcce"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.378256 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-52882e42-0fcf-4f74-a4db-25081e0470db" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "4d75ba9f-0873-4d65-b0c9-5347134bfcce" (UID: "4d75ba9f-0873-4d65-b0c9-5347134bfcce"). InnerVolumeSpecName "pvc-52882e42-0fcf-4f74-a4db-25081e0470db". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.385256 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d75ba9f-0873-4d65-b0c9-5347134bfcce-web-config" (OuterVolumeSpecName: "web-config") pod "4d75ba9f-0873-4d65-b0c9-5347134bfcce" (UID: "4d75ba9f-0873-4d65-b0c9-5347134bfcce"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.446901 4903 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/4d75ba9f-0873-4d65-b0c9-5347134bfcce-tls-assets\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.446935 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qn9bq\" (UniqueName: \"kubernetes.io/projected/4d75ba9f-0873-4d65-b0c9-5347134bfcce-kube-api-access-qn9bq\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.446947 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/4d75ba9f-0873-4d65-b0c9-5347134bfcce-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.446955 4903 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/4d75ba9f-0873-4d65-b0c9-5347134bfcce-web-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.446989 4903 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-52882e42-0fcf-4f74-a4db-25081e0470db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-52882e42-0fcf-4f74-a4db-25081e0470db\") on node \"crc\" " Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.447013 4903 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/4d75ba9f-0873-4d65-b0c9-5347134bfcce-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.447025 4903 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/4d75ba9f-0873-4d65-b0c9-5347134bfcce-config-out\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.475448 4903 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.475618 4903 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-52882e42-0fcf-4f74-a4db-25081e0470db" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-52882e42-0fcf-4f74-a4db-25081e0470db") on node "crc" Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.548904 4903 reconciler_common.go:293] "Volume detached for volume \"pvc-52882e42-0fcf-4f74-a4db-25081e0470db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-52882e42-0fcf-4f74-a4db-25081e0470db\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.935170 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f34b822-e8fa-4f6d-b793-01d0e80ccb06","Type":"ContainerStarted","Data":"7950408ff2acaa82cb11b66eb15b9e0c9caf0ff301f0f80d3785991e68007362"} Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.937991 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"ca375e39-4f68-4f91-be27-8b4975a0ea3c","Type":"ContainerStarted","Data":"70537a6667b1295ad201c8f866f8eb27dcf8a19eb53f2e37be576ec8f18ee477"} Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.957818 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"4d75ba9f-0873-4d65-b0c9-5347134bfcce","Type":"ContainerDied","Data":"496810aef529bd448d7f87ef2958c6a3af232a1ccdd4b59491215798143b4e40"} Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.958169 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.958152 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-0" podStartSLOduration=2.457339506 podStartE2EDuration="13.958136543s" podCreationTimestamp="2025-11-26 22:41:13 +0000 UTC" firstStartedPulling="2025-11-26 22:41:14.23537 +0000 UTC m=+1202.925604910" lastFinishedPulling="2025-11-26 22:41:25.736167027 +0000 UTC m=+1214.426401947" observedRunningTime="2025-11-26 22:41:26.952129513 +0000 UTC m=+1215.642364413" watchObservedRunningTime="2025-11-26 22:41:26.958136543 +0000 UTC m=+1215.648371443" Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.958559 4903 scope.go:117] "RemoveContainer" containerID="4903447e5ba140e892d2c054b0a62788eae2b2ad99a4b2788c389b81207df939" Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.993920 4903 scope.go:117] "RemoveContainer" containerID="dfd9e5a4795c099ca8187b350c89fd8ca7cb66598f49cca978ceb1d28588cb7e" Nov 26 22:41:26 crc kubenswrapper[4903]: I1126 22:41:26.996705 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.031289 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.059348 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 22:41:27 crc kubenswrapper[4903]: E1126 22:41:27.059821 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d75ba9f-0873-4d65-b0c9-5347134bfcce" containerName="prometheus" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.059841 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d75ba9f-0873-4d65-b0c9-5347134bfcce" containerName="prometheus" Nov 26 22:41:27 crc kubenswrapper[4903]: E1126 22:41:27.059858 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d75ba9f-0873-4d65-b0c9-5347134bfcce" containerName="config-reloader" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.059864 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d75ba9f-0873-4d65-b0c9-5347134bfcce" containerName="config-reloader" Nov 26 22:41:27 crc kubenswrapper[4903]: E1126 22:41:27.059896 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d75ba9f-0873-4d65-b0c9-5347134bfcce" containerName="thanos-sidecar" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.059902 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d75ba9f-0873-4d65-b0c9-5347134bfcce" containerName="thanos-sidecar" Nov 26 22:41:27 crc kubenswrapper[4903]: E1126 22:41:27.059921 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d75ba9f-0873-4d65-b0c9-5347134bfcce" containerName="init-config-reloader" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.059927 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d75ba9f-0873-4d65-b0c9-5347134bfcce" containerName="init-config-reloader" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.060110 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d75ba9f-0873-4d65-b0c9-5347134bfcce" containerName="config-reloader" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.060130 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d75ba9f-0873-4d65-b0c9-5347134bfcce" containerName="thanos-sidecar" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.060142 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d75ba9f-0873-4d65-b0c9-5347134bfcce" containerName="prometheus" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.062123 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.077979 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.080733 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.080850 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.081098 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-jvntk" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.081198 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.083328 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.083623 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.083841 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.122861 4903 scope.go:117] "RemoveContainer" containerID="a6a19313ea9c260a0231b410b7ade60ed9788d7161b5121cf0204087d4d1df8d" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.149309 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-6x6mf"] Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.167334 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-2908-account-create-update-dhdsh"] Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.167975 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/a1ef57dd-556d-40c3-8691-c7e55171a7a6-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.168041 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/a1ef57dd-556d-40c3-8691-c7e55171a7a6-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.168070 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1ef57dd-556d-40c3-8691-c7e55171a7a6-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.168089 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/a1ef57dd-556d-40c3-8691-c7e55171a7a6-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.168119 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/a1ef57dd-556d-40c3-8691-c7e55171a7a6-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.168160 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/a1ef57dd-556d-40c3-8691-c7e55171a7a6-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.168205 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/a1ef57dd-556d-40c3-8691-c7e55171a7a6-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.168242 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a1ef57dd-556d-40c3-8691-c7e55171a7a6-config\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.168281 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/a1ef57dd-556d-40c3-8691-c7e55171a7a6-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.168322 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-52882e42-0fcf-4f74-a4db-25081e0470db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-52882e42-0fcf-4f74-a4db-25081e0470db\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.168527 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tf9n7\" (UniqueName: \"kubernetes.io/projected/a1ef57dd-556d-40c3-8691-c7e55171a7a6-kube-api-access-tf9n7\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.212970 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-4fb4-account-create-update-gwgf9"] Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.270044 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/a1ef57dd-556d-40c3-8691-c7e55171a7a6-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.270267 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1ef57dd-556d-40c3-8691-c7e55171a7a6-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.270350 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/a1ef57dd-556d-40c3-8691-c7e55171a7a6-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.270482 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/a1ef57dd-556d-40c3-8691-c7e55171a7a6-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.270584 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/a1ef57dd-556d-40c3-8691-c7e55171a7a6-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.270713 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/a1ef57dd-556d-40c3-8691-c7e55171a7a6-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.270798 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a1ef57dd-556d-40c3-8691-c7e55171a7a6-config\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.270898 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/a1ef57dd-556d-40c3-8691-c7e55171a7a6-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.278013 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/a1ef57dd-556d-40c3-8691-c7e55171a7a6-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.280033 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-52882e42-0fcf-4f74-a4db-25081e0470db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-52882e42-0fcf-4f74-a4db-25081e0470db\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.280216 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tf9n7\" (UniqueName: \"kubernetes.io/projected/a1ef57dd-556d-40c3-8691-c7e55171a7a6-kube-api-access-tf9n7\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.280390 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/a1ef57dd-556d-40c3-8691-c7e55171a7a6-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.285487 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/a1ef57dd-556d-40c3-8691-c7e55171a7a6-config\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.287793 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/a1ef57dd-556d-40c3-8691-c7e55171a7a6-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.292965 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/a1ef57dd-556d-40c3-8691-c7e55171a7a6-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.294801 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/a1ef57dd-556d-40c3-8691-c7e55171a7a6-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.299812 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/a1ef57dd-556d-40c3-8691-c7e55171a7a6-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.301240 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/a1ef57dd-556d-40c3-8691-c7e55171a7a6-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.306509 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1ef57dd-556d-40c3-8691-c7e55171a7a6-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.307018 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/a1ef57dd-556d-40c3-8691-c7e55171a7a6-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.307413 4903 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.307455 4903 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-52882e42-0fcf-4f74-a4db-25081e0470db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-52882e42-0fcf-4f74-a4db-25081e0470db\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/381a5ecb46bedd79ebe106a71ed6a8c447ce6be192d3691459f97b3265cfc441/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.331664 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-020d-account-create-update-ll6gr"] Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.391760 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-s2ggb"] Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.396185 4903 scope.go:117] "RemoveContainer" containerID="dac4b31325676ccfbae2e456949bd0270301da2bf2abc00388945a4b26c423e4" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.407477 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tf9n7\" (UniqueName: \"kubernetes.io/projected/a1ef57dd-556d-40c3-8691-c7e55171a7a6-kube-api-access-tf9n7\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.417647 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-f7fd-account-create-update-m5lqr"] Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.476851 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-ll4x9"] Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.477031 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-52882e42-0fcf-4f74-a4db-25081e0470db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-52882e42-0fcf-4f74-a4db-25081e0470db\") pod \"prometheus-metric-storage-0\" (UID: \"a1ef57dd-556d-40c3-8691-c7e55171a7a6\") " pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.497880 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-m6b9x"] Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.504824 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-tl7bs"] Nov 26 22:41:27 crc kubenswrapper[4903]: W1126 22:41:27.567811 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2b33485e_2d65_4308_aee1_eb14a019f91f.slice/crio-758ebae096580181f6737fe2ade0de23426638e4426f2944ae8be70195490365 WatchSource:0}: Error finding container 758ebae096580181f6737fe2ade0de23426638e4426f2944ae8be70195490365: Status 404 returned error can't find the container with id 758ebae096580181f6737fe2ade0de23426638e4426f2944ae8be70195490365 Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.600453 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.601808 4903 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.971443 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-6x6mf" event={"ID":"f4f63771-5b16-4801-b549-f51085e05d23","Type":"ContainerStarted","Data":"f5fd336c75ab8f2a3d0c6e5cf9b86ec7682fe7225baf5c22c2306328a7b12338"} Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.974322 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-f7fd-account-create-update-m5lqr" event={"ID":"2b33485e-2d65-4308-aee1-eb14a019f91f","Type":"ContainerStarted","Data":"758ebae096580181f6737fe2ade0de23426638e4426f2944ae8be70195490365"} Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.975419 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-ll4x9" event={"ID":"55b6735a-7c30-4cf1-86a9-d61e408ee84d","Type":"ContainerStarted","Data":"ffeaf7b7da8f65caae6daf559510bac026d35c1664c2fbdd2af8349d87e79d41"} Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.976361 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-4fb4-account-create-update-gwgf9" event={"ID":"120af257-f4df-4cb8-ab06-baa9eaaab9b6","Type":"ContainerStarted","Data":"feaf95983911826bb08fe25d3fd70fec3849001235418f02143d0e90d0db145f"} Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.978037 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-s2ggb" event={"ID":"7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd","Type":"ContainerStarted","Data":"f84922ed86a78178a4fb18a561e476b22046deaba40ce96ca7f470a04d50c95b"} Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.982334 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f34b822-e8fa-4f6d-b793-01d0e80ccb06","Type":"ContainerStarted","Data":"d3658c4c5ae88ea942c8bbc9c055fc45535341c6fd366a00e0f63e902d7791c8"} Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.982354 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f34b822-e8fa-4f6d-b793-01d0e80ccb06","Type":"ContainerStarted","Data":"9abe2cfd4d39a1a86561314ebccb47c9035bbc9f77d58499a7984da62d6973d2"} Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.983335 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-m6b9x" event={"ID":"66d689d6-dee8-4ed9-a354-343757962010","Type":"ContainerStarted","Data":"d00a41f8be9bf3985d0beed9c84375003c474d21496fb37f977af357ef5a250f"} Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.991968 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-tl7bs" event={"ID":"bfa72389-fa99-4e21-95c4-ca6a19783753","Type":"ContainerStarted","Data":"75756d4149052e8ac2a542d9b889a11fe8201d4fc81da187765eb464fb050254"} Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.993886 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-2908-account-create-update-dhdsh" event={"ID":"7a15d74f-ad69-40ad-a811-3de51ff0f4e9","Type":"ContainerStarted","Data":"b152d2e1e4384dc39de638efa69e4e69c879d8078ef9f3aa5228053cf3d83394"} Nov 26 22:41:27 crc kubenswrapper[4903]: I1126 22:41:27.995619 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-020d-account-create-update-ll6gr" event={"ID":"41b40a6f-4842-4a24-8cff-cf57fd96bfdd","Type":"ContainerStarted","Data":"8cbb03f0f7530c3d1cc28076bcb4ea588a6236e83c8f1bd15ca12f457822f1a4"} Nov 26 22:41:28 crc kubenswrapper[4903]: I1126 22:41:28.046477 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d75ba9f-0873-4d65-b0c9-5347134bfcce" path="/var/lib/kubelet/pods/4d75ba9f-0873-4d65-b0c9-5347134bfcce/volumes" Nov 26 22:41:28 crc kubenswrapper[4903]: I1126 22:41:28.224527 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 22:41:29 crc kubenswrapper[4903]: I1126 22:41:29.007768 4903 generic.go:334] "Generic (PLEG): container finished" podID="2b33485e-2d65-4308-aee1-eb14a019f91f" containerID="473a4cea01238ffc41e1b8232d0b8584e58cde61ee94ea756646bcde3efead1c" exitCode=0 Nov 26 22:41:29 crc kubenswrapper[4903]: I1126 22:41:29.007894 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-f7fd-account-create-update-m5lqr" event={"ID":"2b33485e-2d65-4308-aee1-eb14a019f91f","Type":"ContainerDied","Data":"473a4cea01238ffc41e1b8232d0b8584e58cde61ee94ea756646bcde3efead1c"} Nov 26 22:41:29 crc kubenswrapper[4903]: I1126 22:41:29.010117 4903 generic.go:334] "Generic (PLEG): container finished" podID="120af257-f4df-4cb8-ab06-baa9eaaab9b6" containerID="22ba27d2a1d072ac91ddf35b5ee76a0cde90ab56360b4fa63d07560ce01e7b44" exitCode=0 Nov 26 22:41:29 crc kubenswrapper[4903]: I1126 22:41:29.010222 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-4fb4-account-create-update-gwgf9" event={"ID":"120af257-f4df-4cb8-ab06-baa9eaaab9b6","Type":"ContainerDied","Data":"22ba27d2a1d072ac91ddf35b5ee76a0cde90ab56360b4fa63d07560ce01e7b44"} Nov 26 22:41:29 crc kubenswrapper[4903]: I1126 22:41:29.014626 4903 generic.go:334] "Generic (PLEG): container finished" podID="55b6735a-7c30-4cf1-86a9-d61e408ee84d" containerID="35d0786e86e60a2ab07701ebf120fb73fd9e1f1698cd3c3ad03a5ebe9e546312" exitCode=0 Nov 26 22:41:29 crc kubenswrapper[4903]: I1126 22:41:29.014728 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-ll4x9" event={"ID":"55b6735a-7c30-4cf1-86a9-d61e408ee84d","Type":"ContainerDied","Data":"35d0786e86e60a2ab07701ebf120fb73fd9e1f1698cd3c3ad03a5ebe9e546312"} Nov 26 22:41:29 crc kubenswrapper[4903]: I1126 22:41:29.016222 4903 generic.go:334] "Generic (PLEG): container finished" podID="7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd" containerID="ea263a427bb296fe4e58bbadcd1a5364cf51a85a387f886157cb91052f292f0f" exitCode=0 Nov 26 22:41:29 crc kubenswrapper[4903]: I1126 22:41:29.016268 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-s2ggb" event={"ID":"7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd","Type":"ContainerDied","Data":"ea263a427bb296fe4e58bbadcd1a5364cf51a85a387f886157cb91052f292f0f"} Nov 26 22:41:29 crc kubenswrapper[4903]: I1126 22:41:29.031330 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f34b822-e8fa-4f6d-b793-01d0e80ccb06","Type":"ContainerStarted","Data":"d8f1b75114ea10e99d8428ad6cad7462af144a8583a5f133428f26a110607a84"} Nov 26 22:41:29 crc kubenswrapper[4903]: I1126 22:41:29.035765 4903 generic.go:334] "Generic (PLEG): container finished" podID="66d689d6-dee8-4ed9-a354-343757962010" containerID="eb3517d9cda792ee8700678cc808637d2da9a8b9a196fce3c2293d2a72c7ea99" exitCode=0 Nov 26 22:41:29 crc kubenswrapper[4903]: I1126 22:41:29.035948 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-m6b9x" event={"ID":"66d689d6-dee8-4ed9-a354-343757962010","Type":"ContainerDied","Data":"eb3517d9cda792ee8700678cc808637d2da9a8b9a196fce3c2293d2a72c7ea99"} Nov 26 22:41:29 crc kubenswrapper[4903]: I1126 22:41:29.038190 4903 generic.go:334] "Generic (PLEG): container finished" podID="f4f63771-5b16-4801-b549-f51085e05d23" containerID="937419d1cfa9dfaaf13f9e87289734539342fe8a3e29b1c2684a29052ff90a6b" exitCode=0 Nov 26 22:41:29 crc kubenswrapper[4903]: I1126 22:41:29.038251 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-6x6mf" event={"ID":"f4f63771-5b16-4801-b549-f51085e05d23","Type":"ContainerDied","Data":"937419d1cfa9dfaaf13f9e87289734539342fe8a3e29b1c2684a29052ff90a6b"} Nov 26 22:41:29 crc kubenswrapper[4903]: I1126 22:41:29.039702 4903 generic.go:334] "Generic (PLEG): container finished" podID="7a15d74f-ad69-40ad-a811-3de51ff0f4e9" containerID="95bd9933a77da21c07c5b067b65641174939d8e3854d10c964a5e63ed44bb7a0" exitCode=0 Nov 26 22:41:29 crc kubenswrapper[4903]: I1126 22:41:29.039770 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-2908-account-create-update-dhdsh" event={"ID":"7a15d74f-ad69-40ad-a811-3de51ff0f4e9","Type":"ContainerDied","Data":"95bd9933a77da21c07c5b067b65641174939d8e3854d10c964a5e63ed44bb7a0"} Nov 26 22:41:29 crc kubenswrapper[4903]: I1126 22:41:29.040830 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a1ef57dd-556d-40c3-8691-c7e55171a7a6","Type":"ContainerStarted","Data":"3b3c227f4543394bd9e27bee8c507c4871d88dcd1176ad891ead88654e052aa3"} Nov 26 22:41:29 crc kubenswrapper[4903]: I1126 22:41:29.042015 4903 generic.go:334] "Generic (PLEG): container finished" podID="41b40a6f-4842-4a24-8cff-cf57fd96bfdd" containerID="2c4bf1480b174afe7951f003504b452575df913d2c0f2dbe00a7551777d82b08" exitCode=0 Nov 26 22:41:29 crc kubenswrapper[4903]: I1126 22:41:29.042046 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-020d-account-create-update-ll6gr" event={"ID":"41b40a6f-4842-4a24-8cff-cf57fd96bfdd","Type":"ContainerDied","Data":"2c4bf1480b174afe7951f003504b452575df913d2c0f2dbe00a7551777d82b08"} Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.096223 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f34b822-e8fa-4f6d-b793-01d0e80ccb06","Type":"ContainerStarted","Data":"12dce6870bf793dd9db67cce5d1d85b131fba1d710feb228287f4eac712b7e55"} Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.096570 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f34b822-e8fa-4f6d-b793-01d0e80ccb06","Type":"ContainerStarted","Data":"2b47ed7231f093275a209204e39f96c965ec474a0af965d35aa70cbfcce34111"} Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.096582 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1f34b822-e8fa-4f6d-b793-01d0e80ccb06","Type":"ContainerStarted","Data":"b28386e148950f89af2e61c57b816ea2a86abd0649f4e4960848b99d015d741e"} Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.188784 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=22.341069008 podStartE2EDuration="38.188766785s" podCreationTimestamp="2025-11-26 22:40:52 +0000 UTC" firstStartedPulling="2025-11-26 22:41:10.36235218 +0000 UTC m=+1199.052587080" lastFinishedPulling="2025-11-26 22:41:26.210049947 +0000 UTC m=+1214.900284857" observedRunningTime="2025-11-26 22:41:30.17959333 +0000 UTC m=+1218.869828240" watchObservedRunningTime="2025-11-26 22:41:30.188766785 +0000 UTC m=+1218.879001695" Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.535894 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-f9gqr"] Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.540789 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.542762 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.551394 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-f9gqr"] Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.558909 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-s2ggb" Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.687450 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd-operator-scripts\") pod \"7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd\" (UID: \"7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd\") " Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.687673 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6m7w8\" (UniqueName: \"kubernetes.io/projected/7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd-kube-api-access-6m7w8\") pod \"7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd\" (UID: \"7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd\") " Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.687994 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f4d67314-0052-4f9e-9e9a-76f829dea702-dns-svc\") pod \"dnsmasq-dns-764c5664d7-f9gqr\" (UID: \"f4d67314-0052-4f9e-9e9a-76f829dea702\") " pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.688015 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f4d67314-0052-4f9e-9e9a-76f829dea702-ovsdbserver-sb\") pod \"dnsmasq-dns-764c5664d7-f9gqr\" (UID: \"f4d67314-0052-4f9e-9e9a-76f829dea702\") " pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.688053 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4d67314-0052-4f9e-9e9a-76f829dea702-config\") pod \"dnsmasq-dns-764c5664d7-f9gqr\" (UID: \"f4d67314-0052-4f9e-9e9a-76f829dea702\") " pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.688116 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z295k\" (UniqueName: \"kubernetes.io/projected/f4d67314-0052-4f9e-9e9a-76f829dea702-kube-api-access-z295k\") pod \"dnsmasq-dns-764c5664d7-f9gqr\" (UID: \"f4d67314-0052-4f9e-9e9a-76f829dea702\") " pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.688140 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f4d67314-0052-4f9e-9e9a-76f829dea702-dns-swift-storage-0\") pod \"dnsmasq-dns-764c5664d7-f9gqr\" (UID: \"f4d67314-0052-4f9e-9e9a-76f829dea702\") " pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.688168 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f4d67314-0052-4f9e-9e9a-76f829dea702-ovsdbserver-nb\") pod \"dnsmasq-dns-764c5664d7-f9gqr\" (UID: \"f4d67314-0052-4f9e-9e9a-76f829dea702\") " pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.689491 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd" (UID: "7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.708896 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd-kube-api-access-6m7w8" (OuterVolumeSpecName: "kube-api-access-6m7w8") pod "7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd" (UID: "7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd"). InnerVolumeSpecName "kube-api-access-6m7w8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.790040 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f4d67314-0052-4f9e-9e9a-76f829dea702-dns-svc\") pod \"dnsmasq-dns-764c5664d7-f9gqr\" (UID: \"f4d67314-0052-4f9e-9e9a-76f829dea702\") " pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.790073 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f4d67314-0052-4f9e-9e9a-76f829dea702-ovsdbserver-sb\") pod \"dnsmasq-dns-764c5664d7-f9gqr\" (UID: \"f4d67314-0052-4f9e-9e9a-76f829dea702\") " pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.790112 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4d67314-0052-4f9e-9e9a-76f829dea702-config\") pod \"dnsmasq-dns-764c5664d7-f9gqr\" (UID: \"f4d67314-0052-4f9e-9e9a-76f829dea702\") " pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.790173 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z295k\" (UniqueName: \"kubernetes.io/projected/f4d67314-0052-4f9e-9e9a-76f829dea702-kube-api-access-z295k\") pod \"dnsmasq-dns-764c5664d7-f9gqr\" (UID: \"f4d67314-0052-4f9e-9e9a-76f829dea702\") " pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.790243 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f4d67314-0052-4f9e-9e9a-76f829dea702-dns-swift-storage-0\") pod \"dnsmasq-dns-764c5664d7-f9gqr\" (UID: \"f4d67314-0052-4f9e-9e9a-76f829dea702\") " pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.790275 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f4d67314-0052-4f9e-9e9a-76f829dea702-ovsdbserver-nb\") pod \"dnsmasq-dns-764c5664d7-f9gqr\" (UID: \"f4d67314-0052-4f9e-9e9a-76f829dea702\") " pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.790345 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6m7w8\" (UniqueName: \"kubernetes.io/projected/7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd-kube-api-access-6m7w8\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.790356 4903 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.791015 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f4d67314-0052-4f9e-9e9a-76f829dea702-dns-svc\") pod \"dnsmasq-dns-764c5664d7-f9gqr\" (UID: \"f4d67314-0052-4f9e-9e9a-76f829dea702\") " pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.791197 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f4d67314-0052-4f9e-9e9a-76f829dea702-ovsdbserver-nb\") pod \"dnsmasq-dns-764c5664d7-f9gqr\" (UID: \"f4d67314-0052-4f9e-9e9a-76f829dea702\") " pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.791590 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4d67314-0052-4f9e-9e9a-76f829dea702-config\") pod \"dnsmasq-dns-764c5664d7-f9gqr\" (UID: \"f4d67314-0052-4f9e-9e9a-76f829dea702\") " pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.793044 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f4d67314-0052-4f9e-9e9a-76f829dea702-dns-swift-storage-0\") pod \"dnsmasq-dns-764c5664d7-f9gqr\" (UID: \"f4d67314-0052-4f9e-9e9a-76f829dea702\") " pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.793303 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f4d67314-0052-4f9e-9e9a-76f829dea702-ovsdbserver-sb\") pod \"dnsmasq-dns-764c5664d7-f9gqr\" (UID: \"f4d67314-0052-4f9e-9e9a-76f829dea702\") " pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.805827 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z295k\" (UniqueName: \"kubernetes.io/projected/f4d67314-0052-4f9e-9e9a-76f829dea702-kube-api-access-z295k\") pod \"dnsmasq-dns-764c5664d7-f9gqr\" (UID: \"f4d67314-0052-4f9e-9e9a-76f829dea702\") " pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" Nov 26 22:41:30 crc kubenswrapper[4903]: I1126 22:41:30.858868 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" Nov 26 22:41:31 crc kubenswrapper[4903]: I1126 22:41:31.113764 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-s2ggb" Nov 26 22:41:31 crc kubenswrapper[4903]: I1126 22:41:31.113835 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-s2ggb" event={"ID":"7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd","Type":"ContainerDied","Data":"f84922ed86a78178a4fb18a561e476b22046deaba40ce96ca7f470a04d50c95b"} Nov 26 22:41:31 crc kubenswrapper[4903]: I1126 22:41:31.113874 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f84922ed86a78178a4fb18a561e476b22046deaba40ce96ca7f470a04d50c95b" Nov 26 22:41:31 crc kubenswrapper[4903]: I1126 22:41:31.981775 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 22:41:31 crc kubenswrapper[4903]: I1126 22:41:31.982118 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 22:41:33 crc kubenswrapper[4903]: I1126 22:41:33.144539 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a1ef57dd-556d-40c3-8691-c7e55171a7a6","Type":"ContainerStarted","Data":"da1d34743605b28ebf296e3458825547a98c9007de785a7429ce042c68b34047"} Nov 26 22:41:33 crc kubenswrapper[4903]: I1126 22:41:33.945979 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-020d-account-create-update-ll6gr" Nov 26 22:41:33 crc kubenswrapper[4903]: I1126 22:41:33.971654 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/41b40a6f-4842-4a24-8cff-cf57fd96bfdd-operator-scripts\") pod \"41b40a6f-4842-4a24-8cff-cf57fd96bfdd\" (UID: \"41b40a6f-4842-4a24-8cff-cf57fd96bfdd\") " Nov 26 22:41:33 crc kubenswrapper[4903]: I1126 22:41:33.972978 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41b40a6f-4842-4a24-8cff-cf57fd96bfdd-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "41b40a6f-4842-4a24-8cff-cf57fd96bfdd" (UID: "41b40a6f-4842-4a24-8cff-cf57fd96bfdd"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:33 crc kubenswrapper[4903]: I1126 22:41:33.973093 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gp7sg\" (UniqueName: \"kubernetes.io/projected/41b40a6f-4842-4a24-8cff-cf57fd96bfdd-kube-api-access-gp7sg\") pod \"41b40a6f-4842-4a24-8cff-cf57fd96bfdd\" (UID: \"41b40a6f-4842-4a24-8cff-cf57fd96bfdd\") " Nov 26 22:41:33 crc kubenswrapper[4903]: I1126 22:41:33.973873 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-ll4x9" Nov 26 22:41:33 crc kubenswrapper[4903]: I1126 22:41:33.974557 4903 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/41b40a6f-4842-4a24-8cff-cf57fd96bfdd-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:33 crc kubenswrapper[4903]: I1126 22:41:33.983214 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41b40a6f-4842-4a24-8cff-cf57fd96bfdd-kube-api-access-gp7sg" (OuterVolumeSpecName: "kube-api-access-gp7sg") pod "41b40a6f-4842-4a24-8cff-cf57fd96bfdd" (UID: "41b40a6f-4842-4a24-8cff-cf57fd96bfdd"). InnerVolumeSpecName "kube-api-access-gp7sg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:41:33 crc kubenswrapper[4903]: I1126 22:41:33.986017 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-m6b9x" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.045843 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-6x6mf" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.059547 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-4fb4-account-create-update-gwgf9" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.072888 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-2908-account-create-update-dhdsh" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.075242 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f4f63771-5b16-4801-b549-f51085e05d23-operator-scripts\") pod \"f4f63771-5b16-4801-b549-f51085e05d23\" (UID: \"f4f63771-5b16-4801-b549-f51085e05d23\") " Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.075902 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55b6735a-7c30-4cf1-86a9-d61e408ee84d-operator-scripts\") pod \"55b6735a-7c30-4cf1-86a9-d61e408ee84d\" (UID: \"55b6735a-7c30-4cf1-86a9-d61e408ee84d\") " Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.076675 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/66d689d6-dee8-4ed9-a354-343757962010-operator-scripts\") pod \"66d689d6-dee8-4ed9-a354-343757962010\" (UID: \"66d689d6-dee8-4ed9-a354-343757962010\") " Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.076838 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8cqpt\" (UniqueName: \"kubernetes.io/projected/120af257-f4df-4cb8-ab06-baa9eaaab9b6-kube-api-access-8cqpt\") pod \"120af257-f4df-4cb8-ab06-baa9eaaab9b6\" (UID: \"120af257-f4df-4cb8-ab06-baa9eaaab9b6\") " Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.076985 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dwlzm\" (UniqueName: \"kubernetes.io/projected/f4f63771-5b16-4801-b549-f51085e05d23-kube-api-access-dwlzm\") pod \"f4f63771-5b16-4801-b549-f51085e05d23\" (UID: \"f4f63771-5b16-4801-b549-f51085e05d23\") " Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.077083 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a15d74f-ad69-40ad-a811-3de51ff0f4e9-operator-scripts\") pod \"7a15d74f-ad69-40ad-a811-3de51ff0f4e9\" (UID: \"7a15d74f-ad69-40ad-a811-3de51ff0f4e9\") " Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.077278 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q8lsh\" (UniqueName: \"kubernetes.io/projected/55b6735a-7c30-4cf1-86a9-d61e408ee84d-kube-api-access-q8lsh\") pod \"55b6735a-7c30-4cf1-86a9-d61e408ee84d\" (UID: \"55b6735a-7c30-4cf1-86a9-d61e408ee84d\") " Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.077403 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jll6q\" (UniqueName: \"kubernetes.io/projected/7a15d74f-ad69-40ad-a811-3de51ff0f4e9-kube-api-access-jll6q\") pod \"7a15d74f-ad69-40ad-a811-3de51ff0f4e9\" (UID: \"7a15d74f-ad69-40ad-a811-3de51ff0f4e9\") " Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.077522 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nlng9\" (UniqueName: \"kubernetes.io/projected/66d689d6-dee8-4ed9-a354-343757962010-kube-api-access-nlng9\") pod \"66d689d6-dee8-4ed9-a354-343757962010\" (UID: \"66d689d6-dee8-4ed9-a354-343757962010\") " Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.077744 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/120af257-f4df-4cb8-ab06-baa9eaaab9b6-operator-scripts\") pod \"120af257-f4df-4cb8-ab06-baa9eaaab9b6\" (UID: \"120af257-f4df-4cb8-ab06-baa9eaaab9b6\") " Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.075865 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4f63771-5b16-4801-b549-f51085e05d23-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f4f63771-5b16-4801-b549-f51085e05d23" (UID: "f4f63771-5b16-4801-b549-f51085e05d23"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.076621 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55b6735a-7c30-4cf1-86a9-d61e408ee84d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "55b6735a-7c30-4cf1-86a9-d61e408ee84d" (UID: "55b6735a-7c30-4cf1-86a9-d61e408ee84d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.078537 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/120af257-f4df-4cb8-ab06-baa9eaaab9b6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "120af257-f4df-4cb8-ab06-baa9eaaab9b6" (UID: "120af257-f4df-4cb8-ab06-baa9eaaab9b6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.081494 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7a15d74f-ad69-40ad-a811-3de51ff0f4e9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7a15d74f-ad69-40ad-a811-3de51ff0f4e9" (UID: "7a15d74f-ad69-40ad-a811-3de51ff0f4e9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.083263 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/66d689d6-dee8-4ed9-a354-343757962010-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "66d689d6-dee8-4ed9-a354-343757962010" (UID: "66d689d6-dee8-4ed9-a354-343757962010"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.086656 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55b6735a-7c30-4cf1-86a9-d61e408ee84d-kube-api-access-q8lsh" (OuterVolumeSpecName: "kube-api-access-q8lsh") pod "55b6735a-7c30-4cf1-86a9-d61e408ee84d" (UID: "55b6735a-7c30-4cf1-86a9-d61e408ee84d"). InnerVolumeSpecName "kube-api-access-q8lsh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.087488 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gp7sg\" (UniqueName: \"kubernetes.io/projected/41b40a6f-4842-4a24-8cff-cf57fd96bfdd-kube-api-access-gp7sg\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.087814 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/120af257-f4df-4cb8-ab06-baa9eaaab9b6-kube-api-access-8cqpt" (OuterVolumeSpecName: "kube-api-access-8cqpt") pod "120af257-f4df-4cb8-ab06-baa9eaaab9b6" (UID: "120af257-f4df-4cb8-ab06-baa9eaaab9b6"). InnerVolumeSpecName "kube-api-access-8cqpt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.092955 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4f63771-5b16-4801-b549-f51085e05d23-kube-api-access-dwlzm" (OuterVolumeSpecName: "kube-api-access-dwlzm") pod "f4f63771-5b16-4801-b549-f51085e05d23" (UID: "f4f63771-5b16-4801-b549-f51085e05d23"). InnerVolumeSpecName "kube-api-access-dwlzm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.100157 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a15d74f-ad69-40ad-a811-3de51ff0f4e9-kube-api-access-jll6q" (OuterVolumeSpecName: "kube-api-access-jll6q") pod "7a15d74f-ad69-40ad-a811-3de51ff0f4e9" (UID: "7a15d74f-ad69-40ad-a811-3de51ff0f4e9"). InnerVolumeSpecName "kube-api-access-jll6q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.101591 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-f7fd-account-create-update-m5lqr" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.117922 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66d689d6-dee8-4ed9-a354-343757962010-kube-api-access-nlng9" (OuterVolumeSpecName: "kube-api-access-nlng9") pod "66d689d6-dee8-4ed9-a354-343757962010" (UID: "66d689d6-dee8-4ed9-a354-343757962010"). InnerVolumeSpecName "kube-api-access-nlng9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.156542 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-f7fd-account-create-update-m5lqr" event={"ID":"2b33485e-2d65-4308-aee1-eb14a019f91f","Type":"ContainerDied","Data":"758ebae096580181f6737fe2ade0de23426638e4426f2944ae8be70195490365"} Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.156586 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="758ebae096580181f6737fe2ade0de23426638e4426f2944ae8be70195490365" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.156581 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-f7fd-account-create-update-m5lqr" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.158976 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-ll4x9" event={"ID":"55b6735a-7c30-4cf1-86a9-d61e408ee84d","Type":"ContainerDied","Data":"ffeaf7b7da8f65caae6daf559510bac026d35c1664c2fbdd2af8349d87e79d41"} Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.158998 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ffeaf7b7da8f65caae6daf559510bac026d35c1664c2fbdd2af8349d87e79d41" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.159034 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-ll4x9" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.160389 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-4fb4-account-create-update-gwgf9" event={"ID":"120af257-f4df-4cb8-ab06-baa9eaaab9b6","Type":"ContainerDied","Data":"feaf95983911826bb08fe25d3fd70fec3849001235418f02143d0e90d0db145f"} Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.160409 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="feaf95983911826bb08fe25d3fd70fec3849001235418f02143d0e90d0db145f" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.160428 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-4fb4-account-create-update-gwgf9" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.162480 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-m6b9x" event={"ID":"66d689d6-dee8-4ed9-a354-343757962010","Type":"ContainerDied","Data":"d00a41f8be9bf3985d0beed9c84375003c474d21496fb37f977af357ef5a250f"} Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.162503 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d00a41f8be9bf3985d0beed9c84375003c474d21496fb37f977af357ef5a250f" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.162548 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-m6b9x" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.163926 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-tl7bs" event={"ID":"bfa72389-fa99-4e21-95c4-ca6a19783753","Type":"ContainerStarted","Data":"40fdf811f5bdfaf929daa3191de012ceb5d946f6491852ad2e0b1a7a7ddf68c2"} Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.167985 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-6x6mf" event={"ID":"f4f63771-5b16-4801-b549-f51085e05d23","Type":"ContainerDied","Data":"f5fd336c75ab8f2a3d0c6e5cf9b86ec7682fe7225baf5c22c2306328a7b12338"} Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.168011 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f5fd336c75ab8f2a3d0c6e5cf9b86ec7682fe7225baf5c22c2306328a7b12338" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.168043 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-6x6mf" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.170241 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-2908-account-create-update-dhdsh" event={"ID":"7a15d74f-ad69-40ad-a811-3de51ff0f4e9","Type":"ContainerDied","Data":"b152d2e1e4384dc39de638efa69e4e69c879d8078ef9f3aa5228053cf3d83394"} Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.170265 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b152d2e1e4384dc39de638efa69e4e69c879d8078ef9f3aa5228053cf3d83394" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.170262 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-2908-account-create-update-dhdsh" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.173271 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-020d-account-create-update-ll6gr" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.173318 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-020d-account-create-update-ll6gr" event={"ID":"41b40a6f-4842-4a24-8cff-cf57fd96bfdd","Type":"ContainerDied","Data":"8cbb03f0f7530c3d1cc28076bcb4ea588a6236e83c8f1bd15ca12f457822f1a4"} Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.173478 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8cbb03f0f7530c3d1cc28076bcb4ea588a6236e83c8f1bd15ca12f457822f1a4" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.188446 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lbz7t\" (UniqueName: \"kubernetes.io/projected/2b33485e-2d65-4308-aee1-eb14a019f91f-kube-api-access-lbz7t\") pod \"2b33485e-2d65-4308-aee1-eb14a019f91f\" (UID: \"2b33485e-2d65-4308-aee1-eb14a019f91f\") " Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.188674 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2b33485e-2d65-4308-aee1-eb14a019f91f-operator-scripts\") pod \"2b33485e-2d65-4308-aee1-eb14a019f91f\" (UID: \"2b33485e-2d65-4308-aee1-eb14a019f91f\") " Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.189195 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b33485e-2d65-4308-aee1-eb14a019f91f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2b33485e-2d65-4308-aee1-eb14a019f91f" (UID: "2b33485e-2d65-4308-aee1-eb14a019f91f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.189354 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q8lsh\" (UniqueName: \"kubernetes.io/projected/55b6735a-7c30-4cf1-86a9-d61e408ee84d-kube-api-access-q8lsh\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.189369 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jll6q\" (UniqueName: \"kubernetes.io/projected/7a15d74f-ad69-40ad-a811-3de51ff0f4e9-kube-api-access-jll6q\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.189379 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nlng9\" (UniqueName: \"kubernetes.io/projected/66d689d6-dee8-4ed9-a354-343757962010-kube-api-access-nlng9\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.189389 4903 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2b33485e-2d65-4308-aee1-eb14a019f91f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.189398 4903 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/120af257-f4df-4cb8-ab06-baa9eaaab9b6-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.189406 4903 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f4f63771-5b16-4801-b549-f51085e05d23-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.189414 4903 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55b6735a-7c30-4cf1-86a9-d61e408ee84d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.189422 4903 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/66d689d6-dee8-4ed9-a354-343757962010-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.189432 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8cqpt\" (UniqueName: \"kubernetes.io/projected/120af257-f4df-4cb8-ab06-baa9eaaab9b6-kube-api-access-8cqpt\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.189442 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dwlzm\" (UniqueName: \"kubernetes.io/projected/f4f63771-5b16-4801-b549-f51085e05d23-kube-api-access-dwlzm\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.189450 4903 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a15d74f-ad69-40ad-a811-3de51ff0f4e9-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.192235 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b33485e-2d65-4308-aee1-eb14a019f91f-kube-api-access-lbz7t" (OuterVolumeSpecName: "kube-api-access-lbz7t") pod "2b33485e-2d65-4308-aee1-eb14a019f91f" (UID: "2b33485e-2d65-4308-aee1-eb14a019f91f"). InnerVolumeSpecName "kube-api-access-lbz7t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.193183 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-tl7bs" podStartSLOduration=11.003124734 podStartE2EDuration="17.193165391s" podCreationTimestamp="2025-11-26 22:41:17 +0000 UTC" firstStartedPulling="2025-11-26 22:41:27.600924882 +0000 UTC m=+1216.291159782" lastFinishedPulling="2025-11-26 22:41:33.790965519 +0000 UTC m=+1222.481200439" observedRunningTime="2025-11-26 22:41:34.179431043 +0000 UTC m=+1222.869665953" watchObservedRunningTime="2025-11-26 22:41:34.193165391 +0000 UTC m=+1222.883400311" Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.245371 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-f9gqr"] Nov 26 22:41:34 crc kubenswrapper[4903]: W1126 22:41:34.245753 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf4d67314_0052_4f9e_9e9a_76f829dea702.slice/crio-e39ece1fc22b955fceab59fc67203814178ba68ec45e8adc037bc16635bf0d4c WatchSource:0}: Error finding container e39ece1fc22b955fceab59fc67203814178ba68ec45e8adc037bc16635bf0d4c: Status 404 returned error can't find the container with id e39ece1fc22b955fceab59fc67203814178ba68ec45e8adc037bc16635bf0d4c Nov 26 22:41:34 crc kubenswrapper[4903]: I1126 22:41:34.291276 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lbz7t\" (UniqueName: \"kubernetes.io/projected/2b33485e-2d65-4308-aee1-eb14a019f91f-kube-api-access-lbz7t\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:35 crc kubenswrapper[4903]: I1126 22:41:35.185618 4903 generic.go:334] "Generic (PLEG): container finished" podID="f4d67314-0052-4f9e-9e9a-76f829dea702" containerID="9661d7c6349fc333fc84235b786844dba28b19d35dcf6a92fcc9cedc6523d376" exitCode=0 Nov 26 22:41:35 crc kubenswrapper[4903]: I1126 22:41:35.186318 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" event={"ID":"f4d67314-0052-4f9e-9e9a-76f829dea702","Type":"ContainerDied","Data":"9661d7c6349fc333fc84235b786844dba28b19d35dcf6a92fcc9cedc6523d376"} Nov 26 22:41:35 crc kubenswrapper[4903]: I1126 22:41:35.186376 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" event={"ID":"f4d67314-0052-4f9e-9e9a-76f829dea702","Type":"ContainerStarted","Data":"e39ece1fc22b955fceab59fc67203814178ba68ec45e8adc037bc16635bf0d4c"} Nov 26 22:41:36 crc kubenswrapper[4903]: I1126 22:41:36.197271 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" event={"ID":"f4d67314-0052-4f9e-9e9a-76f829dea702","Type":"ContainerStarted","Data":"5348c272d0973eb267c4c74082e4d4fcada426624ae66fb34261132c7aeb3ec3"} Nov 26 22:41:36 crc kubenswrapper[4903]: I1126 22:41:36.197443 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" Nov 26 22:41:36 crc kubenswrapper[4903]: I1126 22:41:36.220557 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" podStartSLOduration=6.220541387 podStartE2EDuration="6.220541387s" podCreationTimestamp="2025-11-26 22:41:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:41:36.213650433 +0000 UTC m=+1224.903885343" watchObservedRunningTime="2025-11-26 22:41:36.220541387 +0000 UTC m=+1224.910776297" Nov 26 22:41:40 crc kubenswrapper[4903]: I1126 22:41:40.242250 4903 generic.go:334] "Generic (PLEG): container finished" podID="a1ef57dd-556d-40c3-8691-c7e55171a7a6" containerID="da1d34743605b28ebf296e3458825547a98c9007de785a7429ce042c68b34047" exitCode=0 Nov 26 22:41:40 crc kubenswrapper[4903]: I1126 22:41:40.242459 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a1ef57dd-556d-40c3-8691-c7e55171a7a6","Type":"ContainerDied","Data":"da1d34743605b28ebf296e3458825547a98c9007de785a7429ce042c68b34047"} Nov 26 22:41:40 crc kubenswrapper[4903]: I1126 22:41:40.860993 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" Nov 26 22:41:40 crc kubenswrapper[4903]: I1126 22:41:40.947456 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-5vn9p"] Nov 26 22:41:40 crc kubenswrapper[4903]: I1126 22:41:40.947756 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-698758b865-5vn9p" podUID="96fe1cef-e83b-48c2-8731-f0c74f19ce91" containerName="dnsmasq-dns" containerID="cri-o://b499924691b3488d080ff29b7d923feae092254ae6bad60d20573a2b0f2beac4" gracePeriod=10 Nov 26 22:41:41 crc kubenswrapper[4903]: I1126 22:41:41.253816 4903 generic.go:334] "Generic (PLEG): container finished" podID="bfa72389-fa99-4e21-95c4-ca6a19783753" containerID="40fdf811f5bdfaf929daa3191de012ceb5d946f6491852ad2e0b1a7a7ddf68c2" exitCode=0 Nov 26 22:41:41 crc kubenswrapper[4903]: I1126 22:41:41.253890 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-tl7bs" event={"ID":"bfa72389-fa99-4e21-95c4-ca6a19783753","Type":"ContainerDied","Data":"40fdf811f5bdfaf929daa3191de012ceb5d946f6491852ad2e0b1a7a7ddf68c2"} Nov 26 22:41:41 crc kubenswrapper[4903]: I1126 22:41:41.255466 4903 generic.go:334] "Generic (PLEG): container finished" podID="96fe1cef-e83b-48c2-8731-f0c74f19ce91" containerID="b499924691b3488d080ff29b7d923feae092254ae6bad60d20573a2b0f2beac4" exitCode=0 Nov 26 22:41:41 crc kubenswrapper[4903]: I1126 22:41:41.255526 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-5vn9p" event={"ID":"96fe1cef-e83b-48c2-8731-f0c74f19ce91","Type":"ContainerDied","Data":"b499924691b3488d080ff29b7d923feae092254ae6bad60d20573a2b0f2beac4"} Nov 26 22:41:41 crc kubenswrapper[4903]: I1126 22:41:41.258963 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a1ef57dd-556d-40c3-8691-c7e55171a7a6","Type":"ContainerStarted","Data":"4e2553b95300f1c1fab48338132ec2675379ddf9661065a3545f8fb5b74d76b5"} Nov 26 22:41:41 crc kubenswrapper[4903]: I1126 22:41:41.488340 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-5vn9p" Nov 26 22:41:41 crc kubenswrapper[4903]: I1126 22:41:41.632702 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f8cjv\" (UniqueName: \"kubernetes.io/projected/96fe1cef-e83b-48c2-8731-f0c74f19ce91-kube-api-access-f8cjv\") pod \"96fe1cef-e83b-48c2-8731-f0c74f19ce91\" (UID: \"96fe1cef-e83b-48c2-8731-f0c74f19ce91\") " Nov 26 22:41:41 crc kubenswrapper[4903]: I1126 22:41:41.632914 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/96fe1cef-e83b-48c2-8731-f0c74f19ce91-ovsdbserver-nb\") pod \"96fe1cef-e83b-48c2-8731-f0c74f19ce91\" (UID: \"96fe1cef-e83b-48c2-8731-f0c74f19ce91\") " Nov 26 22:41:41 crc kubenswrapper[4903]: I1126 22:41:41.632962 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/96fe1cef-e83b-48c2-8731-f0c74f19ce91-ovsdbserver-sb\") pod \"96fe1cef-e83b-48c2-8731-f0c74f19ce91\" (UID: \"96fe1cef-e83b-48c2-8731-f0c74f19ce91\") " Nov 26 22:41:41 crc kubenswrapper[4903]: I1126 22:41:41.633021 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/96fe1cef-e83b-48c2-8731-f0c74f19ce91-dns-svc\") pod \"96fe1cef-e83b-48c2-8731-f0c74f19ce91\" (UID: \"96fe1cef-e83b-48c2-8731-f0c74f19ce91\") " Nov 26 22:41:41 crc kubenswrapper[4903]: I1126 22:41:41.633050 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96fe1cef-e83b-48c2-8731-f0c74f19ce91-config\") pod \"96fe1cef-e83b-48c2-8731-f0c74f19ce91\" (UID: \"96fe1cef-e83b-48c2-8731-f0c74f19ce91\") " Nov 26 22:41:41 crc kubenswrapper[4903]: I1126 22:41:41.650864 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96fe1cef-e83b-48c2-8731-f0c74f19ce91-kube-api-access-f8cjv" (OuterVolumeSpecName: "kube-api-access-f8cjv") pod "96fe1cef-e83b-48c2-8731-f0c74f19ce91" (UID: "96fe1cef-e83b-48c2-8731-f0c74f19ce91"). InnerVolumeSpecName "kube-api-access-f8cjv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:41:41 crc kubenswrapper[4903]: I1126 22:41:41.680368 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96fe1cef-e83b-48c2-8731-f0c74f19ce91-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "96fe1cef-e83b-48c2-8731-f0c74f19ce91" (UID: "96fe1cef-e83b-48c2-8731-f0c74f19ce91"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:41 crc kubenswrapper[4903]: I1126 22:41:41.682322 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96fe1cef-e83b-48c2-8731-f0c74f19ce91-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "96fe1cef-e83b-48c2-8731-f0c74f19ce91" (UID: "96fe1cef-e83b-48c2-8731-f0c74f19ce91"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:41 crc kubenswrapper[4903]: I1126 22:41:41.704500 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96fe1cef-e83b-48c2-8731-f0c74f19ce91-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "96fe1cef-e83b-48c2-8731-f0c74f19ce91" (UID: "96fe1cef-e83b-48c2-8731-f0c74f19ce91"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:41 crc kubenswrapper[4903]: I1126 22:41:41.707944 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96fe1cef-e83b-48c2-8731-f0c74f19ce91-config" (OuterVolumeSpecName: "config") pod "96fe1cef-e83b-48c2-8731-f0c74f19ce91" (UID: "96fe1cef-e83b-48c2-8731-f0c74f19ce91"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:41 crc kubenswrapper[4903]: I1126 22:41:41.735323 4903 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/96fe1cef-e83b-48c2-8731-f0c74f19ce91-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:41 crc kubenswrapper[4903]: I1126 22:41:41.735458 4903 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/96fe1cef-e83b-48c2-8731-f0c74f19ce91-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:41 crc kubenswrapper[4903]: I1126 22:41:41.735511 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96fe1cef-e83b-48c2-8731-f0c74f19ce91-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:41 crc kubenswrapper[4903]: I1126 22:41:41.735572 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f8cjv\" (UniqueName: \"kubernetes.io/projected/96fe1cef-e83b-48c2-8731-f0c74f19ce91-kube-api-access-f8cjv\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:41 crc kubenswrapper[4903]: I1126 22:41:41.735627 4903 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/96fe1cef-e83b-48c2-8731-f0c74f19ce91-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:42 crc kubenswrapper[4903]: I1126 22:41:42.272154 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-5vn9p" event={"ID":"96fe1cef-e83b-48c2-8731-f0c74f19ce91","Type":"ContainerDied","Data":"42e0cf19c1b6f510e2be0ec6c8fa79a1337725df2eee393aec718ae7dcf1718b"} Nov 26 22:41:42 crc kubenswrapper[4903]: I1126 22:41:42.272399 4903 scope.go:117] "RemoveContainer" containerID="b499924691b3488d080ff29b7d923feae092254ae6bad60d20573a2b0f2beac4" Nov 26 22:41:42 crc kubenswrapper[4903]: I1126 22:41:42.272358 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-5vn9p" Nov 26 22:41:42 crc kubenswrapper[4903]: I1126 22:41:42.275103 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-tz77l" event={"ID":"2388445a-1656-41aa-8daa-a120993c24ad","Type":"ContainerStarted","Data":"1a4c90f3dfd5e0cc46c98e62069ed051a120dbf3962c91a1f4f11c59a33a62cf"} Nov 26 22:41:42 crc kubenswrapper[4903]: I1126 22:41:42.298666 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-tz77l" podStartSLOduration=4.00014915 podStartE2EDuration="37.298647579s" podCreationTimestamp="2025-11-26 22:41:05 +0000 UTC" firstStartedPulling="2025-11-26 22:41:07.225088646 +0000 UTC m=+1195.915323556" lastFinishedPulling="2025-11-26 22:41:40.523587075 +0000 UTC m=+1229.213821985" observedRunningTime="2025-11-26 22:41:42.29792416 +0000 UTC m=+1230.988159070" watchObservedRunningTime="2025-11-26 22:41:42.298647579 +0000 UTC m=+1230.988882489" Nov 26 22:41:42 crc kubenswrapper[4903]: I1126 22:41:42.322732 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-5vn9p"] Nov 26 22:41:42 crc kubenswrapper[4903]: I1126 22:41:42.330486 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-698758b865-5vn9p"] Nov 26 22:41:42 crc kubenswrapper[4903]: I1126 22:41:42.332859 4903 scope.go:117] "RemoveContainer" containerID="f0b9d8b32473b2967c9f1661d8597d03dd65fb4934bff21ffaf523d317aa5871" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.003970 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-tl7bs" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.178121 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfa72389-fa99-4e21-95c4-ca6a19783753-combined-ca-bundle\") pod \"bfa72389-fa99-4e21-95c4-ca6a19783753\" (UID: \"bfa72389-fa99-4e21-95c4-ca6a19783753\") " Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.178318 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfa72389-fa99-4e21-95c4-ca6a19783753-config-data\") pod \"bfa72389-fa99-4e21-95c4-ca6a19783753\" (UID: \"bfa72389-fa99-4e21-95c4-ca6a19783753\") " Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.178405 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wbx46\" (UniqueName: \"kubernetes.io/projected/bfa72389-fa99-4e21-95c4-ca6a19783753-kube-api-access-wbx46\") pod \"bfa72389-fa99-4e21-95c4-ca6a19783753\" (UID: \"bfa72389-fa99-4e21-95c4-ca6a19783753\") " Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.186869 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bfa72389-fa99-4e21-95c4-ca6a19783753-kube-api-access-wbx46" (OuterVolumeSpecName: "kube-api-access-wbx46") pod "bfa72389-fa99-4e21-95c4-ca6a19783753" (UID: "bfa72389-fa99-4e21-95c4-ca6a19783753"). InnerVolumeSpecName "kube-api-access-wbx46". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.227440 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfa72389-fa99-4e21-95c4-ca6a19783753-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bfa72389-fa99-4e21-95c4-ca6a19783753" (UID: "bfa72389-fa99-4e21-95c4-ca6a19783753"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.275313 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfa72389-fa99-4e21-95c4-ca6a19783753-config-data" (OuterVolumeSpecName: "config-data") pod "bfa72389-fa99-4e21-95c4-ca6a19783753" (UID: "bfa72389-fa99-4e21-95c4-ca6a19783753"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.281595 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfa72389-fa99-4e21-95c4-ca6a19783753-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.281665 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfa72389-fa99-4e21-95c4-ca6a19783753-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.282007 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wbx46\" (UniqueName: \"kubernetes.io/projected/bfa72389-fa99-4e21-95c4-ca6a19783753-kube-api-access-wbx46\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.290255 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-tl7bs" event={"ID":"bfa72389-fa99-4e21-95c4-ca6a19783753","Type":"ContainerDied","Data":"75756d4149052e8ac2a542d9b889a11fe8201d4fc81da187765eb464fb050254"} Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.290314 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="75756d4149052e8ac2a542d9b889a11fe8201d4fc81da187765eb464fb050254" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.290392 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-tl7bs" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.561476 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5959f8865f-kkpg2"] Nov 26 22:41:43 crc kubenswrapper[4903]: E1126 22:41:43.562125 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a15d74f-ad69-40ad-a811-3de51ff0f4e9" containerName="mariadb-account-create-update" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.562136 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a15d74f-ad69-40ad-a811-3de51ff0f4e9" containerName="mariadb-account-create-update" Nov 26 22:41:43 crc kubenswrapper[4903]: E1126 22:41:43.562152 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4f63771-5b16-4801-b549-f51085e05d23" containerName="mariadb-database-create" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.562158 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4f63771-5b16-4801-b549-f51085e05d23" containerName="mariadb-database-create" Nov 26 22:41:43 crc kubenswrapper[4903]: E1126 22:41:43.562180 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66d689d6-dee8-4ed9-a354-343757962010" containerName="mariadb-database-create" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.562187 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="66d689d6-dee8-4ed9-a354-343757962010" containerName="mariadb-database-create" Nov 26 22:41:43 crc kubenswrapper[4903]: E1126 22:41:43.562202 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96fe1cef-e83b-48c2-8731-f0c74f19ce91" containerName="dnsmasq-dns" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.562208 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="96fe1cef-e83b-48c2-8731-f0c74f19ce91" containerName="dnsmasq-dns" Nov 26 22:41:43 crc kubenswrapper[4903]: E1126 22:41:43.562220 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96fe1cef-e83b-48c2-8731-f0c74f19ce91" containerName="init" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.562226 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="96fe1cef-e83b-48c2-8731-f0c74f19ce91" containerName="init" Nov 26 22:41:43 crc kubenswrapper[4903]: E1126 22:41:43.562238 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bfa72389-fa99-4e21-95c4-ca6a19783753" containerName="keystone-db-sync" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.562243 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="bfa72389-fa99-4e21-95c4-ca6a19783753" containerName="keystone-db-sync" Nov 26 22:41:43 crc kubenswrapper[4903]: E1126 22:41:43.562251 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55b6735a-7c30-4cf1-86a9-d61e408ee84d" containerName="mariadb-database-create" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.562257 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="55b6735a-7c30-4cf1-86a9-d61e408ee84d" containerName="mariadb-database-create" Nov 26 22:41:43 crc kubenswrapper[4903]: E1126 22:41:43.562274 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41b40a6f-4842-4a24-8cff-cf57fd96bfdd" containerName="mariadb-account-create-update" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.562280 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="41b40a6f-4842-4a24-8cff-cf57fd96bfdd" containerName="mariadb-account-create-update" Nov 26 22:41:43 crc kubenswrapper[4903]: E1126 22:41:43.562294 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b33485e-2d65-4308-aee1-eb14a019f91f" containerName="mariadb-account-create-update" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.562300 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b33485e-2d65-4308-aee1-eb14a019f91f" containerName="mariadb-account-create-update" Nov 26 22:41:43 crc kubenswrapper[4903]: E1126 22:41:43.562310 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="120af257-f4df-4cb8-ab06-baa9eaaab9b6" containerName="mariadb-account-create-update" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.562316 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="120af257-f4df-4cb8-ab06-baa9eaaab9b6" containerName="mariadb-account-create-update" Nov 26 22:41:43 crc kubenswrapper[4903]: E1126 22:41:43.562327 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd" containerName="mariadb-database-create" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.562333 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd" containerName="mariadb-database-create" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.562513 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4f63771-5b16-4801-b549-f51085e05d23" containerName="mariadb-database-create" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.562527 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd" containerName="mariadb-database-create" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.562536 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b33485e-2d65-4308-aee1-eb14a019f91f" containerName="mariadb-account-create-update" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.562548 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="41b40a6f-4842-4a24-8cff-cf57fd96bfdd" containerName="mariadb-account-create-update" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.562560 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="120af257-f4df-4cb8-ab06-baa9eaaab9b6" containerName="mariadb-account-create-update" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.562570 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="bfa72389-fa99-4e21-95c4-ca6a19783753" containerName="keystone-db-sync" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.562583 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="66d689d6-dee8-4ed9-a354-343757962010" containerName="mariadb-database-create" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.562592 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a15d74f-ad69-40ad-a811-3de51ff0f4e9" containerName="mariadb-account-create-update" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.562600 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="55b6735a-7c30-4cf1-86a9-d61e408ee84d" containerName="mariadb-database-create" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.562608 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="96fe1cef-e83b-48c2-8731-f0c74f19ce91" containerName="dnsmasq-dns" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.566584 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5959f8865f-kkpg2" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.580832 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5959f8865f-kkpg2"] Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.606836 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-m4wm4"] Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.608685 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-m4wm4" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.611241 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.611478 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.612459 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-pdkfz" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.612628 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.612743 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.639884 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-m4wm4"] Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.690351 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7951138f-c694-4178-82da-419ea357195a-ovsdbserver-sb\") pod \"dnsmasq-dns-5959f8865f-kkpg2\" (UID: \"7951138f-c694-4178-82da-419ea357195a\") " pod="openstack/dnsmasq-dns-5959f8865f-kkpg2" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.690500 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ncbzc\" (UniqueName: \"kubernetes.io/projected/7951138f-c694-4178-82da-419ea357195a-kube-api-access-ncbzc\") pod \"dnsmasq-dns-5959f8865f-kkpg2\" (UID: \"7951138f-c694-4178-82da-419ea357195a\") " pod="openstack/dnsmasq-dns-5959f8865f-kkpg2" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.690572 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7951138f-c694-4178-82da-419ea357195a-dns-svc\") pod \"dnsmasq-dns-5959f8865f-kkpg2\" (UID: \"7951138f-c694-4178-82da-419ea357195a\") " pod="openstack/dnsmasq-dns-5959f8865f-kkpg2" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.690718 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7951138f-c694-4178-82da-419ea357195a-ovsdbserver-nb\") pod \"dnsmasq-dns-5959f8865f-kkpg2\" (UID: \"7951138f-c694-4178-82da-419ea357195a\") " pod="openstack/dnsmasq-dns-5959f8865f-kkpg2" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.690781 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7951138f-c694-4178-82da-419ea357195a-dns-swift-storage-0\") pod \"dnsmasq-dns-5959f8865f-kkpg2\" (UID: \"7951138f-c694-4178-82da-419ea357195a\") " pod="openstack/dnsmasq-dns-5959f8865f-kkpg2" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.691006 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7951138f-c694-4178-82da-419ea357195a-config\") pod \"dnsmasq-dns-5959f8865f-kkpg2\" (UID: \"7951138f-c694-4178-82da-419ea357195a\") " pod="openstack/dnsmasq-dns-5959f8865f-kkpg2" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.698583 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-dz7dx"] Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.700652 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-dz7dx" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.705498 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.705664 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-9xvp4" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.717332 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-dz7dx"] Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.759833 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-fxksz"] Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.761521 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-fxksz" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.764500 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.764587 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.770373 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-q7t2v" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.779287 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-fxksz"] Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.792298 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7951138f-c694-4178-82da-419ea357195a-ovsdbserver-nb\") pod \"dnsmasq-dns-5959f8865f-kkpg2\" (UID: \"7951138f-c694-4178-82da-419ea357195a\") " pod="openstack/dnsmasq-dns-5959f8865f-kkpg2" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.792348 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2pl4x\" (UniqueName: \"kubernetes.io/projected/8a96189f-52eb-44aa-8638-96d516cd0eb3-kube-api-access-2pl4x\") pod \"heat-db-sync-dz7dx\" (UID: \"8a96189f-52eb-44aa-8638-96d516cd0eb3\") " pod="openstack/heat-db-sync-dz7dx" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.792375 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7951138f-c694-4178-82da-419ea357195a-dns-swift-storage-0\") pod \"dnsmasq-dns-5959f8865f-kkpg2\" (UID: \"7951138f-c694-4178-82da-419ea357195a\") " pod="openstack/dnsmasq-dns-5959f8865f-kkpg2" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.792424 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/325e2202-624d-4202-aa20-bd6007c642b5-fernet-keys\") pod \"keystone-bootstrap-m4wm4\" (UID: \"325e2202-624d-4202-aa20-bd6007c642b5\") " pod="openstack/keystone-bootstrap-m4wm4" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.792446 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/325e2202-624d-4202-aa20-bd6007c642b5-combined-ca-bundle\") pod \"keystone-bootstrap-m4wm4\" (UID: \"325e2202-624d-4202-aa20-bd6007c642b5\") " pod="openstack/keystone-bootstrap-m4wm4" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.792474 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7951138f-c694-4178-82da-419ea357195a-config\") pod \"dnsmasq-dns-5959f8865f-kkpg2\" (UID: \"7951138f-c694-4178-82da-419ea357195a\") " pod="openstack/dnsmasq-dns-5959f8865f-kkpg2" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.792493 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/325e2202-624d-4202-aa20-bd6007c642b5-config-data\") pod \"keystone-bootstrap-m4wm4\" (UID: \"325e2202-624d-4202-aa20-bd6007c642b5\") " pod="openstack/keystone-bootstrap-m4wm4" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.792518 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a96189f-52eb-44aa-8638-96d516cd0eb3-config-data\") pod \"heat-db-sync-dz7dx\" (UID: \"8a96189f-52eb-44aa-8638-96d516cd0eb3\") " pod="openstack/heat-db-sync-dz7dx" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.792546 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7951138f-c694-4178-82da-419ea357195a-ovsdbserver-sb\") pod \"dnsmasq-dns-5959f8865f-kkpg2\" (UID: \"7951138f-c694-4178-82da-419ea357195a\") " pod="openstack/dnsmasq-dns-5959f8865f-kkpg2" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.792570 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/325e2202-624d-4202-aa20-bd6007c642b5-credential-keys\") pod \"keystone-bootstrap-m4wm4\" (UID: \"325e2202-624d-4202-aa20-bd6007c642b5\") " pod="openstack/keystone-bootstrap-m4wm4" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.792592 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/325e2202-624d-4202-aa20-bd6007c642b5-scripts\") pod \"keystone-bootstrap-m4wm4\" (UID: \"325e2202-624d-4202-aa20-bd6007c642b5\") " pod="openstack/keystone-bootstrap-m4wm4" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.792635 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zw6nm\" (UniqueName: \"kubernetes.io/projected/325e2202-624d-4202-aa20-bd6007c642b5-kube-api-access-zw6nm\") pod \"keystone-bootstrap-m4wm4\" (UID: \"325e2202-624d-4202-aa20-bd6007c642b5\") " pod="openstack/keystone-bootstrap-m4wm4" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.792659 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ncbzc\" (UniqueName: \"kubernetes.io/projected/7951138f-c694-4178-82da-419ea357195a-kube-api-access-ncbzc\") pod \"dnsmasq-dns-5959f8865f-kkpg2\" (UID: \"7951138f-c694-4178-82da-419ea357195a\") " pod="openstack/dnsmasq-dns-5959f8865f-kkpg2" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.792677 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a96189f-52eb-44aa-8638-96d516cd0eb3-combined-ca-bundle\") pod \"heat-db-sync-dz7dx\" (UID: \"8a96189f-52eb-44aa-8638-96d516cd0eb3\") " pod="openstack/heat-db-sync-dz7dx" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.792724 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7951138f-c694-4178-82da-419ea357195a-dns-svc\") pod \"dnsmasq-dns-5959f8865f-kkpg2\" (UID: \"7951138f-c694-4178-82da-419ea357195a\") " pod="openstack/dnsmasq-dns-5959f8865f-kkpg2" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.793565 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7951138f-c694-4178-82da-419ea357195a-dns-svc\") pod \"dnsmasq-dns-5959f8865f-kkpg2\" (UID: \"7951138f-c694-4178-82da-419ea357195a\") " pod="openstack/dnsmasq-dns-5959f8865f-kkpg2" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.793661 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7951138f-c694-4178-82da-419ea357195a-ovsdbserver-nb\") pod \"dnsmasq-dns-5959f8865f-kkpg2\" (UID: \"7951138f-c694-4178-82da-419ea357195a\") " pod="openstack/dnsmasq-dns-5959f8865f-kkpg2" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.797542 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-fv6wn"] Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.798928 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-fv6wn" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.803393 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7951138f-c694-4178-82da-419ea357195a-ovsdbserver-sb\") pod \"dnsmasq-dns-5959f8865f-kkpg2\" (UID: \"7951138f-c694-4178-82da-419ea357195a\") " pod="openstack/dnsmasq-dns-5959f8865f-kkpg2" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.805212 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.805373 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.805474 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-lzclj" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.811841 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7951138f-c694-4178-82da-419ea357195a-config\") pod \"dnsmasq-dns-5959f8865f-kkpg2\" (UID: \"7951138f-c694-4178-82da-419ea357195a\") " pod="openstack/dnsmasq-dns-5959f8865f-kkpg2" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.812584 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7951138f-c694-4178-82da-419ea357195a-dns-swift-storage-0\") pod \"dnsmasq-dns-5959f8865f-kkpg2\" (UID: \"7951138f-c694-4178-82da-419ea357195a\") " pod="openstack/dnsmasq-dns-5959f8865f-kkpg2" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.821540 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-fv6wn"] Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.839478 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ncbzc\" (UniqueName: \"kubernetes.io/projected/7951138f-c694-4178-82da-419ea357195a-kube-api-access-ncbzc\") pod \"dnsmasq-dns-5959f8865f-kkpg2\" (UID: \"7951138f-c694-4178-82da-419ea357195a\") " pod="openstack/dnsmasq-dns-5959f8865f-kkpg2" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.888476 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5959f8865f-kkpg2" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.896040 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26f6j\" (UniqueName: \"kubernetes.io/projected/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-kube-api-access-26f6j\") pod \"cinder-db-sync-fxksz\" (UID: \"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c\") " pod="openstack/cinder-db-sync-fxksz" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.896088 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/325e2202-624d-4202-aa20-bd6007c642b5-credential-keys\") pod \"keystone-bootstrap-m4wm4\" (UID: \"325e2202-624d-4202-aa20-bd6007c642b5\") " pod="openstack/keystone-bootstrap-m4wm4" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.896112 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/325e2202-624d-4202-aa20-bd6007c642b5-scripts\") pod \"keystone-bootstrap-m4wm4\" (UID: \"325e2202-624d-4202-aa20-bd6007c642b5\") " pod="openstack/keystone-bootstrap-m4wm4" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.896146 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-scripts\") pod \"cinder-db-sync-fxksz\" (UID: \"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c\") " pod="openstack/cinder-db-sync-fxksz" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.896166 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qzhzr\" (UniqueName: \"kubernetes.io/projected/e30967f0-d295-4017-a586-5b1afdbcd625-kube-api-access-qzhzr\") pod \"neutron-db-sync-fv6wn\" (UID: \"e30967f0-d295-4017-a586-5b1afdbcd625\") " pod="openstack/neutron-db-sync-fv6wn" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.896185 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e30967f0-d295-4017-a586-5b1afdbcd625-combined-ca-bundle\") pod \"neutron-db-sync-fv6wn\" (UID: \"e30967f0-d295-4017-a586-5b1afdbcd625\") " pod="openstack/neutron-db-sync-fv6wn" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.896211 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zw6nm\" (UniqueName: \"kubernetes.io/projected/325e2202-624d-4202-aa20-bd6007c642b5-kube-api-access-zw6nm\") pod \"keystone-bootstrap-m4wm4\" (UID: \"325e2202-624d-4202-aa20-bd6007c642b5\") " pod="openstack/keystone-bootstrap-m4wm4" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.896238 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a96189f-52eb-44aa-8638-96d516cd0eb3-combined-ca-bundle\") pod \"heat-db-sync-dz7dx\" (UID: \"8a96189f-52eb-44aa-8638-96d516cd0eb3\") " pod="openstack/heat-db-sync-dz7dx" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.896260 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-combined-ca-bundle\") pod \"cinder-db-sync-fxksz\" (UID: \"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c\") " pod="openstack/cinder-db-sync-fxksz" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.896290 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-config-data\") pod \"cinder-db-sync-fxksz\" (UID: \"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c\") " pod="openstack/cinder-db-sync-fxksz" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.896306 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-db-sync-config-data\") pod \"cinder-db-sync-fxksz\" (UID: \"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c\") " pod="openstack/cinder-db-sync-fxksz" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.896334 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-etc-machine-id\") pod \"cinder-db-sync-fxksz\" (UID: \"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c\") " pod="openstack/cinder-db-sync-fxksz" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.896354 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2pl4x\" (UniqueName: \"kubernetes.io/projected/8a96189f-52eb-44aa-8638-96d516cd0eb3-kube-api-access-2pl4x\") pod \"heat-db-sync-dz7dx\" (UID: \"8a96189f-52eb-44aa-8638-96d516cd0eb3\") " pod="openstack/heat-db-sync-dz7dx" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.896396 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e30967f0-d295-4017-a586-5b1afdbcd625-config\") pod \"neutron-db-sync-fv6wn\" (UID: \"e30967f0-d295-4017-a586-5b1afdbcd625\") " pod="openstack/neutron-db-sync-fv6wn" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.896413 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/325e2202-624d-4202-aa20-bd6007c642b5-fernet-keys\") pod \"keystone-bootstrap-m4wm4\" (UID: \"325e2202-624d-4202-aa20-bd6007c642b5\") " pod="openstack/keystone-bootstrap-m4wm4" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.896437 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/325e2202-624d-4202-aa20-bd6007c642b5-combined-ca-bundle\") pod \"keystone-bootstrap-m4wm4\" (UID: \"325e2202-624d-4202-aa20-bd6007c642b5\") " pod="openstack/keystone-bootstrap-m4wm4" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.896461 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/325e2202-624d-4202-aa20-bd6007c642b5-config-data\") pod \"keystone-bootstrap-m4wm4\" (UID: \"325e2202-624d-4202-aa20-bd6007c642b5\") " pod="openstack/keystone-bootstrap-m4wm4" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.896483 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a96189f-52eb-44aa-8638-96d516cd0eb3-config-data\") pod \"heat-db-sync-dz7dx\" (UID: \"8a96189f-52eb-44aa-8638-96d516cd0eb3\") " pod="openstack/heat-db-sync-dz7dx" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.906436 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/325e2202-624d-4202-aa20-bd6007c642b5-credential-keys\") pod \"keystone-bootstrap-m4wm4\" (UID: \"325e2202-624d-4202-aa20-bd6007c642b5\") " pod="openstack/keystone-bootstrap-m4wm4" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.922949 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-xhf2r"] Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.934067 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-xhf2r" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.935167 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/325e2202-624d-4202-aa20-bd6007c642b5-fernet-keys\") pod \"keystone-bootstrap-m4wm4\" (UID: \"325e2202-624d-4202-aa20-bd6007c642b5\") " pod="openstack/keystone-bootstrap-m4wm4" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.935829 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/325e2202-624d-4202-aa20-bd6007c642b5-config-data\") pod \"keystone-bootstrap-m4wm4\" (UID: \"325e2202-624d-4202-aa20-bd6007c642b5\") " pod="openstack/keystone-bootstrap-m4wm4" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.937133 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zw6nm\" (UniqueName: \"kubernetes.io/projected/325e2202-624d-4202-aa20-bd6007c642b5-kube-api-access-zw6nm\") pod \"keystone-bootstrap-m4wm4\" (UID: \"325e2202-624d-4202-aa20-bd6007c642b5\") " pod="openstack/keystone-bootstrap-m4wm4" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.943163 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a96189f-52eb-44aa-8638-96d516cd0eb3-config-data\") pod \"heat-db-sync-dz7dx\" (UID: \"8a96189f-52eb-44aa-8638-96d516cd0eb3\") " pod="openstack/heat-db-sync-dz7dx" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.944581 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.944880 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.945146 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-w9p4f" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.962248 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/325e2202-624d-4202-aa20-bd6007c642b5-scripts\") pod \"keystone-bootstrap-m4wm4\" (UID: \"325e2202-624d-4202-aa20-bd6007c642b5\") " pod="openstack/keystone-bootstrap-m4wm4" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.962311 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2pl4x\" (UniqueName: \"kubernetes.io/projected/8a96189f-52eb-44aa-8638-96d516cd0eb3-kube-api-access-2pl4x\") pod \"heat-db-sync-dz7dx\" (UID: \"8a96189f-52eb-44aa-8638-96d516cd0eb3\") " pod="openstack/heat-db-sync-dz7dx" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.962484 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/325e2202-624d-4202-aa20-bd6007c642b5-combined-ca-bundle\") pod \"keystone-bootstrap-m4wm4\" (UID: \"325e2202-624d-4202-aa20-bd6007c642b5\") " pod="openstack/keystone-bootstrap-m4wm4" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.963425 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a96189f-52eb-44aa-8638-96d516cd0eb3-combined-ca-bundle\") pod \"heat-db-sync-dz7dx\" (UID: \"8a96189f-52eb-44aa-8638-96d516cd0eb3\") " pod="openstack/heat-db-sync-dz7dx" Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.969680 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-xhf2r"] Nov 26 22:41:43 crc kubenswrapper[4903]: I1126 22:41:43.983268 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5959f8865f-kkpg2"] Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:43.999758 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97a5a56e-9f78-4cc6-9299-ebe193cad354-config-data\") pod \"placement-db-sync-xhf2r\" (UID: \"97a5a56e-9f78-4cc6-9299-ebe193cad354\") " pod="openstack/placement-db-sync-xhf2r" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.003016 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e30967f0-d295-4017-a586-5b1afdbcd625-config\") pod \"neutron-db-sync-fv6wn\" (UID: \"e30967f0-d295-4017-a586-5b1afdbcd625\") " pod="openstack/neutron-db-sync-fv6wn" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.003045 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97a5a56e-9f78-4cc6-9299-ebe193cad354-logs\") pod \"placement-db-sync-xhf2r\" (UID: \"97a5a56e-9f78-4cc6-9299-ebe193cad354\") " pod="openstack/placement-db-sync-xhf2r" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.003152 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97a5a56e-9f78-4cc6-9299-ebe193cad354-combined-ca-bundle\") pod \"placement-db-sync-xhf2r\" (UID: \"97a5a56e-9f78-4cc6-9299-ebe193cad354\") " pod="openstack/placement-db-sync-xhf2r" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.003176 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26f6j\" (UniqueName: \"kubernetes.io/projected/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-kube-api-access-26f6j\") pod \"cinder-db-sync-fxksz\" (UID: \"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c\") " pod="openstack/cinder-db-sync-fxksz" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.003245 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-scripts\") pod \"cinder-db-sync-fxksz\" (UID: \"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c\") " pod="openstack/cinder-db-sync-fxksz" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.003266 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gngg6\" (UniqueName: \"kubernetes.io/projected/97a5a56e-9f78-4cc6-9299-ebe193cad354-kube-api-access-gngg6\") pod \"placement-db-sync-xhf2r\" (UID: \"97a5a56e-9f78-4cc6-9299-ebe193cad354\") " pod="openstack/placement-db-sync-xhf2r" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.003283 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qzhzr\" (UniqueName: \"kubernetes.io/projected/e30967f0-d295-4017-a586-5b1afdbcd625-kube-api-access-qzhzr\") pod \"neutron-db-sync-fv6wn\" (UID: \"e30967f0-d295-4017-a586-5b1afdbcd625\") " pod="openstack/neutron-db-sync-fv6wn" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.003303 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e30967f0-d295-4017-a586-5b1afdbcd625-combined-ca-bundle\") pod \"neutron-db-sync-fv6wn\" (UID: \"e30967f0-d295-4017-a586-5b1afdbcd625\") " pod="openstack/neutron-db-sync-fv6wn" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.003378 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-combined-ca-bundle\") pod \"cinder-db-sync-fxksz\" (UID: \"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c\") " pod="openstack/cinder-db-sync-fxksz" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.003424 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-config-data\") pod \"cinder-db-sync-fxksz\" (UID: \"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c\") " pod="openstack/cinder-db-sync-fxksz" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.003494 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-db-sync-config-data\") pod \"cinder-db-sync-fxksz\" (UID: \"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c\") " pod="openstack/cinder-db-sync-fxksz" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.003559 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-etc-machine-id\") pod \"cinder-db-sync-fxksz\" (UID: \"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c\") " pod="openstack/cinder-db-sync-fxksz" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.003579 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97a5a56e-9f78-4cc6-9299-ebe193cad354-scripts\") pod \"placement-db-sync-xhf2r\" (UID: \"97a5a56e-9f78-4cc6-9299-ebe193cad354\") " pod="openstack/placement-db-sync-xhf2r" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.009748 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/e30967f0-d295-4017-a586-5b1afdbcd625-config\") pod \"neutron-db-sync-fv6wn\" (UID: \"e30967f0-d295-4017-a586-5b1afdbcd625\") " pod="openstack/neutron-db-sync-fv6wn" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.009914 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-etc-machine-id\") pod \"cinder-db-sync-fxksz\" (UID: \"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c\") " pod="openstack/cinder-db-sync-fxksz" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.016092 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-db-sync-config-data\") pod \"cinder-db-sync-fxksz\" (UID: \"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c\") " pod="openstack/cinder-db-sync-fxksz" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.018333 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-combined-ca-bundle\") pod \"cinder-db-sync-fxksz\" (UID: \"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c\") " pod="openstack/cinder-db-sync-fxksz" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.019256 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-config-data\") pod \"cinder-db-sync-fxksz\" (UID: \"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c\") " pod="openstack/cinder-db-sync-fxksz" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.022367 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-scripts\") pod \"cinder-db-sync-fxksz\" (UID: \"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c\") " pod="openstack/cinder-db-sync-fxksz" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.026092 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-58dd9ff6bc-htt6q"] Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.032066 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e30967f0-d295-4017-a586-5b1afdbcd625-combined-ca-bundle\") pod \"neutron-db-sync-fv6wn\" (UID: \"e30967f0-d295-4017-a586-5b1afdbcd625\") " pod="openstack/neutron-db-sync-fv6wn" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.033698 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.038403 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-dz7dx" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.044209 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26f6j\" (UniqueName: \"kubernetes.io/projected/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-kube-api-access-26f6j\") pod \"cinder-db-sync-fxksz\" (UID: \"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c\") " pod="openstack/cinder-db-sync-fxksz" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.050428 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qzhzr\" (UniqueName: \"kubernetes.io/projected/e30967f0-d295-4017-a586-5b1afdbcd625-kube-api-access-qzhzr\") pod \"neutron-db-sync-fv6wn\" (UID: \"e30967f0-d295-4017-a586-5b1afdbcd625\") " pod="openstack/neutron-db-sync-fv6wn" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.085862 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96fe1cef-e83b-48c2-8731-f0c74f19ce91" path="/var/lib/kubelet/pods/96fe1cef-e83b-48c2-8731-f0c74f19ce91/volumes" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.090669 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-fxksz" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.117734 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gngg6\" (UniqueName: \"kubernetes.io/projected/97a5a56e-9f78-4cc6-9299-ebe193cad354-kube-api-access-gngg6\") pod \"placement-db-sync-xhf2r\" (UID: \"97a5a56e-9f78-4cc6-9299-ebe193cad354\") " pod="openstack/placement-db-sync-xhf2r" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.119936 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/01971aea-a19a-4c71-a893-b6ee8277160d-dns-svc\") pod \"dnsmasq-dns-58dd9ff6bc-htt6q\" (UID: \"01971aea-a19a-4c71-a893-b6ee8277160d\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.120053 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/01971aea-a19a-4c71-a893-b6ee8277160d-ovsdbserver-sb\") pod \"dnsmasq-dns-58dd9ff6bc-htt6q\" (UID: \"01971aea-a19a-4c71-a893-b6ee8277160d\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.120556 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-58dd9ff6bc-htt6q"] Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.122897 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97a5a56e-9f78-4cc6-9299-ebe193cad354-scripts\") pod \"placement-db-sync-xhf2r\" (UID: \"97a5a56e-9f78-4cc6-9299-ebe193cad354\") " pod="openstack/placement-db-sync-xhf2r" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.122942 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01971aea-a19a-4c71-a893-b6ee8277160d-config\") pod \"dnsmasq-dns-58dd9ff6bc-htt6q\" (UID: \"01971aea-a19a-4c71-a893-b6ee8277160d\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.122974 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/01971aea-a19a-4c71-a893-b6ee8277160d-ovsdbserver-nb\") pod \"dnsmasq-dns-58dd9ff6bc-htt6q\" (UID: \"01971aea-a19a-4c71-a893-b6ee8277160d\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.123064 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97a5a56e-9f78-4cc6-9299-ebe193cad354-config-data\") pod \"placement-db-sync-xhf2r\" (UID: \"97a5a56e-9f78-4cc6-9299-ebe193cad354\") " pod="openstack/placement-db-sync-xhf2r" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.123126 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97a5a56e-9f78-4cc6-9299-ebe193cad354-logs\") pod \"placement-db-sync-xhf2r\" (UID: \"97a5a56e-9f78-4cc6-9299-ebe193cad354\") " pod="openstack/placement-db-sync-xhf2r" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.123196 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/01971aea-a19a-4c71-a893-b6ee8277160d-dns-swift-storage-0\") pod \"dnsmasq-dns-58dd9ff6bc-htt6q\" (UID: \"01971aea-a19a-4c71-a893-b6ee8277160d\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.124194 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7phz\" (UniqueName: \"kubernetes.io/projected/01971aea-a19a-4c71-a893-b6ee8277160d-kube-api-access-t7phz\") pod \"dnsmasq-dns-58dd9ff6bc-htt6q\" (UID: \"01971aea-a19a-4c71-a893-b6ee8277160d\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.124350 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97a5a56e-9f78-4cc6-9299-ebe193cad354-combined-ca-bundle\") pod \"placement-db-sync-xhf2r\" (UID: \"97a5a56e-9f78-4cc6-9299-ebe193cad354\") " pod="openstack/placement-db-sync-xhf2r" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.137908 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97a5a56e-9f78-4cc6-9299-ebe193cad354-logs\") pod \"placement-db-sync-xhf2r\" (UID: \"97a5a56e-9f78-4cc6-9299-ebe193cad354\") " pod="openstack/placement-db-sync-xhf2r" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.139747 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-w6n8h"] Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.151150 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-w6n8h" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.154197 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97a5a56e-9f78-4cc6-9299-ebe193cad354-scripts\") pod \"placement-db-sync-xhf2r\" (UID: \"97a5a56e-9f78-4cc6-9299-ebe193cad354\") " pod="openstack/placement-db-sync-xhf2r" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.154248 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-w6n8h"] Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.154647 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gngg6\" (UniqueName: \"kubernetes.io/projected/97a5a56e-9f78-4cc6-9299-ebe193cad354-kube-api-access-gngg6\") pod \"placement-db-sync-xhf2r\" (UID: \"97a5a56e-9f78-4cc6-9299-ebe193cad354\") " pod="openstack/placement-db-sync-xhf2r" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.155234 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97a5a56e-9f78-4cc6-9299-ebe193cad354-combined-ca-bundle\") pod \"placement-db-sync-xhf2r\" (UID: \"97a5a56e-9f78-4cc6-9299-ebe193cad354\") " pod="openstack/placement-db-sync-xhf2r" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.156521 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-nc6m5" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.158016 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97a5a56e-9f78-4cc6-9299-ebe193cad354-config-data\") pod \"placement-db-sync-xhf2r\" (UID: \"97a5a56e-9f78-4cc6-9299-ebe193cad354\") " pod="openstack/placement-db-sync-xhf2r" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.165178 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.166489 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-fv6wn" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.230654 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-m4wm4" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.231462 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f42951d5-40b8-4f39-8a87-5f7e5809bf87-db-sync-config-data\") pod \"barbican-db-sync-w6n8h\" (UID: \"f42951d5-40b8-4f39-8a87-5f7e5809bf87\") " pod="openstack/barbican-db-sync-w6n8h" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.231583 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/01971aea-a19a-4c71-a893-b6ee8277160d-dns-svc\") pod \"dnsmasq-dns-58dd9ff6bc-htt6q\" (UID: \"01971aea-a19a-4c71-a893-b6ee8277160d\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.231609 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/01971aea-a19a-4c71-a893-b6ee8277160d-ovsdbserver-sb\") pod \"dnsmasq-dns-58dd9ff6bc-htt6q\" (UID: \"01971aea-a19a-4c71-a893-b6ee8277160d\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.231671 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01971aea-a19a-4c71-a893-b6ee8277160d-config\") pod \"dnsmasq-dns-58dd9ff6bc-htt6q\" (UID: \"01971aea-a19a-4c71-a893-b6ee8277160d\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.231703 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kdx64\" (UniqueName: \"kubernetes.io/projected/f42951d5-40b8-4f39-8a87-5f7e5809bf87-kube-api-access-kdx64\") pod \"barbican-db-sync-w6n8h\" (UID: \"f42951d5-40b8-4f39-8a87-5f7e5809bf87\") " pod="openstack/barbican-db-sync-w6n8h" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.231729 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/01971aea-a19a-4c71-a893-b6ee8277160d-ovsdbserver-nb\") pod \"dnsmasq-dns-58dd9ff6bc-htt6q\" (UID: \"01971aea-a19a-4c71-a893-b6ee8277160d\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.231792 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f42951d5-40b8-4f39-8a87-5f7e5809bf87-combined-ca-bundle\") pod \"barbican-db-sync-w6n8h\" (UID: \"f42951d5-40b8-4f39-8a87-5f7e5809bf87\") " pod="openstack/barbican-db-sync-w6n8h" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.231892 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/01971aea-a19a-4c71-a893-b6ee8277160d-dns-swift-storage-0\") pod \"dnsmasq-dns-58dd9ff6bc-htt6q\" (UID: \"01971aea-a19a-4c71-a893-b6ee8277160d\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.231937 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7phz\" (UniqueName: \"kubernetes.io/projected/01971aea-a19a-4c71-a893-b6ee8277160d-kube-api-access-t7phz\") pod \"dnsmasq-dns-58dd9ff6bc-htt6q\" (UID: \"01971aea-a19a-4c71-a893-b6ee8277160d\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.232805 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/01971aea-a19a-4c71-a893-b6ee8277160d-ovsdbserver-nb\") pod \"dnsmasq-dns-58dd9ff6bc-htt6q\" (UID: \"01971aea-a19a-4c71-a893-b6ee8277160d\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.234161 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/01971aea-a19a-4c71-a893-b6ee8277160d-dns-swift-storage-0\") pod \"dnsmasq-dns-58dd9ff6bc-htt6q\" (UID: \"01971aea-a19a-4c71-a893-b6ee8277160d\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.236822 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01971aea-a19a-4c71-a893-b6ee8277160d-config\") pod \"dnsmasq-dns-58dd9ff6bc-htt6q\" (UID: \"01971aea-a19a-4c71-a893-b6ee8277160d\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.236998 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/01971aea-a19a-4c71-a893-b6ee8277160d-dns-svc\") pod \"dnsmasq-dns-58dd9ff6bc-htt6q\" (UID: \"01971aea-a19a-4c71-a893-b6ee8277160d\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.243237 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/01971aea-a19a-4c71-a893-b6ee8277160d-ovsdbserver-sb\") pod \"dnsmasq-dns-58dd9ff6bc-htt6q\" (UID: \"01971aea-a19a-4c71-a893-b6ee8277160d\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.256836 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7phz\" (UniqueName: \"kubernetes.io/projected/01971aea-a19a-4c71-a893-b6ee8277160d-kube-api-access-t7phz\") pod \"dnsmasq-dns-58dd9ff6bc-htt6q\" (UID: \"01971aea-a19a-4c71-a893-b6ee8277160d\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.333131 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-xhf2r" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.336323 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f42951d5-40b8-4f39-8a87-5f7e5809bf87-db-sync-config-data\") pod \"barbican-db-sync-w6n8h\" (UID: \"f42951d5-40b8-4f39-8a87-5f7e5809bf87\") " pod="openstack/barbican-db-sync-w6n8h" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.336419 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kdx64\" (UniqueName: \"kubernetes.io/projected/f42951d5-40b8-4f39-8a87-5f7e5809bf87-kube-api-access-kdx64\") pod \"barbican-db-sync-w6n8h\" (UID: \"f42951d5-40b8-4f39-8a87-5f7e5809bf87\") " pod="openstack/barbican-db-sync-w6n8h" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.336458 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f42951d5-40b8-4f39-8a87-5f7e5809bf87-combined-ca-bundle\") pod \"barbican-db-sync-w6n8h\" (UID: \"f42951d5-40b8-4f39-8a87-5f7e5809bf87\") " pod="openstack/barbican-db-sync-w6n8h" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.348875 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f42951d5-40b8-4f39-8a87-5f7e5809bf87-db-sync-config-data\") pod \"barbican-db-sync-w6n8h\" (UID: \"f42951d5-40b8-4f39-8a87-5f7e5809bf87\") " pod="openstack/barbican-db-sync-w6n8h" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.353327 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f42951d5-40b8-4f39-8a87-5f7e5809bf87-combined-ca-bundle\") pod \"barbican-db-sync-w6n8h\" (UID: \"f42951d5-40b8-4f39-8a87-5f7e5809bf87\") " pod="openstack/barbican-db-sync-w6n8h" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.355467 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kdx64\" (UniqueName: \"kubernetes.io/projected/f42951d5-40b8-4f39-8a87-5f7e5809bf87-kube-api-access-kdx64\") pod \"barbican-db-sync-w6n8h\" (UID: \"f42951d5-40b8-4f39-8a87-5f7e5809bf87\") " pod="openstack/barbican-db-sync-w6n8h" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.356597 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a1ef57dd-556d-40c3-8691-c7e55171a7a6","Type":"ContainerStarted","Data":"3dcb7e5ec25d1e557d378380c2cf2001b4293e348dc44bbed7e62b71da591a70"} Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.356634 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a1ef57dd-556d-40c3-8691-c7e55171a7a6","Type":"ContainerStarted","Data":"481bc31c7ce4713ed4252f35383c35dd1070d8873dbd20ad6f4b577d15eb16db"} Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.370365 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.448320 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=18.448298383 podStartE2EDuration="18.448298383s" podCreationTimestamp="2025-11-26 22:41:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:41:44.430296891 +0000 UTC m=+1233.120531801" watchObservedRunningTime="2025-11-26 22:41:44.448298383 +0000 UTC m=+1233.138533293" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.518779 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-w6n8h" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.667615 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5959f8865f-kkpg2"] Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.883982 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.886677 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.891319 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.897195 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.913004 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-dz7dx"] Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.940753 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.949547 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\") " pod="openstack/ceilometer-0" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.949678 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-scripts\") pod \"ceilometer-0\" (UID: \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\") " pod="openstack/ceilometer-0" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.953401 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-log-httpd\") pod \"ceilometer-0\" (UID: \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\") " pod="openstack/ceilometer-0" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.953434 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\") " pod="openstack/ceilometer-0" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.953454 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-run-httpd\") pod \"ceilometer-0\" (UID: \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\") " pod="openstack/ceilometer-0" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.953541 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-config-data\") pod \"ceilometer-0\" (UID: \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\") " pod="openstack/ceilometer-0" Nov 26 22:41:44 crc kubenswrapper[4903]: I1126 22:41:44.953627 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-84g89\" (UniqueName: \"kubernetes.io/projected/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-kube-api-access-84g89\") pod \"ceilometer-0\" (UID: \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\") " pod="openstack/ceilometer-0" Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.055853 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-log-httpd\") pod \"ceilometer-0\" (UID: \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\") " pod="openstack/ceilometer-0" Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.056265 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\") " pod="openstack/ceilometer-0" Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.056340 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-run-httpd\") pod \"ceilometer-0\" (UID: \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\") " pod="openstack/ceilometer-0" Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.056428 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-config-data\") pod \"ceilometer-0\" (UID: \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\") " pod="openstack/ceilometer-0" Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.056518 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-84g89\" (UniqueName: \"kubernetes.io/projected/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-kube-api-access-84g89\") pod \"ceilometer-0\" (UID: \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\") " pod="openstack/ceilometer-0" Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.056596 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\") " pod="openstack/ceilometer-0" Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.056758 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-scripts\") pod \"ceilometer-0\" (UID: \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\") " pod="openstack/ceilometer-0" Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.061271 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-run-httpd\") pod \"ceilometer-0\" (UID: \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\") " pod="openstack/ceilometer-0" Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.061368 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-log-httpd\") pod \"ceilometer-0\" (UID: \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\") " pod="openstack/ceilometer-0" Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.061800 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\") " pod="openstack/ceilometer-0" Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.064312 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\") " pod="openstack/ceilometer-0" Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.064809 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-config-data\") pod \"ceilometer-0\" (UID: \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\") " pod="openstack/ceilometer-0" Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.065400 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-scripts\") pod \"ceilometer-0\" (UID: \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\") " pod="openstack/ceilometer-0" Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.074226 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-84g89\" (UniqueName: \"kubernetes.io/projected/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-kube-api-access-84g89\") pod \"ceilometer-0\" (UID: \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\") " pod="openstack/ceilometer-0" Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.226480 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-fv6wn"] Nov 26 22:41:45 crc kubenswrapper[4903]: W1126 22:41:45.230949 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode30967f0_d295_4017_a586_5b1afdbcd625.slice/crio-82cd0f839132ee3c57e2a1ebb7218094461e7e1ef4e959d731188f2efa02cd1a WatchSource:0}: Error finding container 82cd0f839132ee3c57e2a1ebb7218094461e7e1ef4e959d731188f2efa02cd1a: Status 404 returned error can't find the container with id 82cd0f839132ee3c57e2a1ebb7218094461e7e1ef4e959d731188f2efa02cd1a Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.233598 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.358544 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-fxksz"] Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.385676 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-dz7dx" event={"ID":"8a96189f-52eb-44aa-8638-96d516cd0eb3","Type":"ContainerStarted","Data":"de017ef6c413ae718d1a63c119f36cfb3643fa1ed01e649f6229c43acd47a77a"} Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.387912 4903 generic.go:334] "Generic (PLEG): container finished" podID="7951138f-c694-4178-82da-419ea357195a" containerID="947d7382f9492da8be10b127f673de6951a31176452832aa48bfa3410234db7d" exitCode=0 Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.388097 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5959f8865f-kkpg2" event={"ID":"7951138f-c694-4178-82da-419ea357195a","Type":"ContainerDied","Data":"947d7382f9492da8be10b127f673de6951a31176452832aa48bfa3410234db7d"} Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.388130 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5959f8865f-kkpg2" event={"ID":"7951138f-c694-4178-82da-419ea357195a","Type":"ContainerStarted","Data":"841a7a2e36feed9916accda54afc08bcf510f1632066fc3831f3bb5e4657b5f4"} Nov 26 22:41:45 crc kubenswrapper[4903]: W1126 22:41:45.399063 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd0b2d5fd_9425_4082_b1cc_3ce796c82e0c.slice/crio-ad2000c5b6ecd8a5eadcf57407ee4dfdb2337180b6f24eab030b76d5ad7ff32b WatchSource:0}: Error finding container ad2000c5b6ecd8a5eadcf57407ee4dfdb2337180b6f24eab030b76d5ad7ff32b: Status 404 returned error can't find the container with id ad2000c5b6ecd8a5eadcf57407ee4dfdb2337180b6f24eab030b76d5ad7ff32b Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.408872 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-fv6wn" event={"ID":"e30967f0-d295-4017-a586-5b1afdbcd625","Type":"ContainerStarted","Data":"82cd0f839132ee3c57e2a1ebb7218094461e7e1ef4e959d731188f2efa02cd1a"} Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.410203 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-m4wm4"] Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.474038 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-xhf2r"] Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.504006 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-58dd9ff6bc-htt6q"] Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.575893 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-w6n8h"] Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.951262 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5959f8865f-kkpg2" Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.995194 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7951138f-c694-4178-82da-419ea357195a-ovsdbserver-sb\") pod \"7951138f-c694-4178-82da-419ea357195a\" (UID: \"7951138f-c694-4178-82da-419ea357195a\") " Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.995261 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7951138f-c694-4178-82da-419ea357195a-ovsdbserver-nb\") pod \"7951138f-c694-4178-82da-419ea357195a\" (UID: \"7951138f-c694-4178-82da-419ea357195a\") " Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.995350 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7951138f-c694-4178-82da-419ea357195a-dns-swift-storage-0\") pod \"7951138f-c694-4178-82da-419ea357195a\" (UID: \"7951138f-c694-4178-82da-419ea357195a\") " Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.995398 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7951138f-c694-4178-82da-419ea357195a-dns-svc\") pod \"7951138f-c694-4178-82da-419ea357195a\" (UID: \"7951138f-c694-4178-82da-419ea357195a\") " Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.995522 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ncbzc\" (UniqueName: \"kubernetes.io/projected/7951138f-c694-4178-82da-419ea357195a-kube-api-access-ncbzc\") pod \"7951138f-c694-4178-82da-419ea357195a\" (UID: \"7951138f-c694-4178-82da-419ea357195a\") " Nov 26 22:41:45 crc kubenswrapper[4903]: I1126 22:41:45.995565 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7951138f-c694-4178-82da-419ea357195a-config\") pod \"7951138f-c694-4178-82da-419ea357195a\" (UID: \"7951138f-c694-4178-82da-419ea357195a\") " Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.020922 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7951138f-c694-4178-82da-419ea357195a-kube-api-access-ncbzc" (OuterVolumeSpecName: "kube-api-access-ncbzc") pod "7951138f-c694-4178-82da-419ea357195a" (UID: "7951138f-c694-4178-82da-419ea357195a"). InnerVolumeSpecName "kube-api-access-ncbzc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.042469 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7951138f-c694-4178-82da-419ea357195a-config" (OuterVolumeSpecName: "config") pod "7951138f-c694-4178-82da-419ea357195a" (UID: "7951138f-c694-4178-82da-419ea357195a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.071134 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.099006 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7951138f-c694-4178-82da-419ea357195a-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.099052 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ncbzc\" (UniqueName: \"kubernetes.io/projected/7951138f-c694-4178-82da-419ea357195a-kube-api-access-ncbzc\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.101315 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7951138f-c694-4178-82da-419ea357195a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7951138f-c694-4178-82da-419ea357195a" (UID: "7951138f-c694-4178-82da-419ea357195a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.117818 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7951138f-c694-4178-82da-419ea357195a-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "7951138f-c694-4178-82da-419ea357195a" (UID: "7951138f-c694-4178-82da-419ea357195a"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.118049 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7951138f-c694-4178-82da-419ea357195a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7951138f-c694-4178-82da-419ea357195a" (UID: "7951138f-c694-4178-82da-419ea357195a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.119707 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7951138f-c694-4178-82da-419ea357195a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7951138f-c694-4178-82da-419ea357195a" (UID: "7951138f-c694-4178-82da-419ea357195a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.203838 4903 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7951138f-c694-4178-82da-419ea357195a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.204096 4903 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7951138f-c694-4178-82da-419ea357195a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.204108 4903 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7951138f-c694-4178-82da-419ea357195a-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.204121 4903 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7951138f-c694-4178-82da-419ea357195a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.315539 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.468113 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e3b2305d-da0f-4efb-9ac8-3df8527f9dec","Type":"ContainerStarted","Data":"a567cbf60e1fd4b8a4c3ebd3638f1ffd0ec5f5f3556bbb6c496720e9592ee96c"} Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.489095 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-fv6wn" event={"ID":"e30967f0-d295-4017-a586-5b1afdbcd625","Type":"ContainerStarted","Data":"08c1c908848efb6fe54928f091e9fc6bc99f387b7e4f214ed95ed5e31db1b743"} Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.503239 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-xhf2r" event={"ID":"97a5a56e-9f78-4cc6-9299-ebe193cad354","Type":"ContainerStarted","Data":"2e6951f8f6173c343554925b13d9de350ee0076c899ed6c48b028dc4960a1a69"} Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.516838 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-fv6wn" podStartSLOduration=3.516820945 podStartE2EDuration="3.516820945s" podCreationTimestamp="2025-11-26 22:41:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:41:46.51181218 +0000 UTC m=+1235.202047090" watchObservedRunningTime="2025-11-26 22:41:46.516820945 +0000 UTC m=+1235.207055855" Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.520992 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-w6n8h" event={"ID":"f42951d5-40b8-4f39-8a87-5f7e5809bf87","Type":"ContainerStarted","Data":"a08835c0cb3c4a4c7fba385f1f459f9f7aceddc8a3359675b078fecfb68e03db"} Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.530484 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-m4wm4" event={"ID":"325e2202-624d-4202-aa20-bd6007c642b5","Type":"ContainerStarted","Data":"df67350bf4d64374226400873f1e620459360fb823347769be1a77f45e46c776"} Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.530552 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-m4wm4" event={"ID":"325e2202-624d-4202-aa20-bd6007c642b5","Type":"ContainerStarted","Data":"6da7de9e06c70964c0884bb7d79b3efc78a3b99dde82386d2e4ea9af36b7747b"} Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.538968 4903 generic.go:334] "Generic (PLEG): container finished" podID="01971aea-a19a-4c71-a893-b6ee8277160d" containerID="5c8ffd427c8deabb98f42817bc106ea201f2a22167d555e80493dad5988c1d2d" exitCode=0 Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.539292 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" event={"ID":"01971aea-a19a-4c71-a893-b6ee8277160d","Type":"ContainerDied","Data":"5c8ffd427c8deabb98f42817bc106ea201f2a22167d555e80493dad5988c1d2d"} Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.539322 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" event={"ID":"01971aea-a19a-4c71-a893-b6ee8277160d","Type":"ContainerStarted","Data":"eb43f1c16c353287d45d2e83ce3ae153756967cc68b530972ae006cf7b1705b3"} Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.554311 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-fxksz" event={"ID":"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c","Type":"ContainerStarted","Data":"ad2000c5b6ecd8a5eadcf57407ee4dfdb2337180b6f24eab030b76d5ad7ff32b"} Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.558238 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5959f8865f-kkpg2" event={"ID":"7951138f-c694-4178-82da-419ea357195a","Type":"ContainerDied","Data":"841a7a2e36feed9916accda54afc08bcf510f1632066fc3831f3bb5e4657b5f4"} Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.558279 4903 scope.go:117] "RemoveContainer" containerID="947d7382f9492da8be10b127f673de6951a31176452832aa48bfa3410234db7d" Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.558405 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5959f8865f-kkpg2" Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.566905 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-m4wm4" podStartSLOduration=3.566886635 podStartE2EDuration="3.566886635s" podCreationTimestamp="2025-11-26 22:41:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:41:46.559074926 +0000 UTC m=+1235.249309836" watchObservedRunningTime="2025-11-26 22:41:46.566886635 +0000 UTC m=+1235.257121545" Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.740600 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5959f8865f-kkpg2"] Nov 26 22:41:46 crc kubenswrapper[4903]: I1126 22:41:46.762926 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5959f8865f-kkpg2"] Nov 26 22:41:47 crc kubenswrapper[4903]: I1126 22:41:47.580238 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" event={"ID":"01971aea-a19a-4c71-a893-b6ee8277160d","Type":"ContainerStarted","Data":"9f0b48aed9d3be0d5f6315ec0a04331b82ac108fdc78630d6977d1c25035206c"} Nov 26 22:41:47 crc kubenswrapper[4903]: I1126 22:41:47.580500 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" Nov 26 22:41:47 crc kubenswrapper[4903]: I1126 22:41:47.601291 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:47 crc kubenswrapper[4903]: I1126 22:41:47.602960 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" podStartSLOduration=4.602939589 podStartE2EDuration="4.602939589s" podCreationTimestamp="2025-11-26 22:41:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:41:47.599063965 +0000 UTC m=+1236.289298875" watchObservedRunningTime="2025-11-26 22:41:47.602939589 +0000 UTC m=+1236.293174499" Nov 26 22:41:48 crc kubenswrapper[4903]: I1126 22:41:48.042525 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7951138f-c694-4178-82da-419ea357195a" path="/var/lib/kubelet/pods/7951138f-c694-4178-82da-419ea357195a/volumes" Nov 26 22:41:50 crc kubenswrapper[4903]: I1126 22:41:50.613399 4903 generic.go:334] "Generic (PLEG): container finished" podID="325e2202-624d-4202-aa20-bd6007c642b5" containerID="df67350bf4d64374226400873f1e620459360fb823347769be1a77f45e46c776" exitCode=0 Nov 26 22:41:50 crc kubenswrapper[4903]: I1126 22:41:50.614015 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-m4wm4" event={"ID":"325e2202-624d-4202-aa20-bd6007c642b5","Type":"ContainerDied","Data":"df67350bf4d64374226400873f1e620459360fb823347769be1a77f45e46c776"} Nov 26 22:41:52 crc kubenswrapper[4903]: I1126 22:41:52.637661 4903 generic.go:334] "Generic (PLEG): container finished" podID="2388445a-1656-41aa-8daa-a120993c24ad" containerID="1a4c90f3dfd5e0cc46c98e62069ed051a120dbf3962c91a1f4f11c59a33a62cf" exitCode=0 Nov 26 22:41:52 crc kubenswrapper[4903]: I1126 22:41:52.637767 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-tz77l" event={"ID":"2388445a-1656-41aa-8daa-a120993c24ad","Type":"ContainerDied","Data":"1a4c90f3dfd5e0cc46c98e62069ed051a120dbf3962c91a1f4f11c59a33a62cf"} Nov 26 22:41:54 crc kubenswrapper[4903]: I1126 22:41:54.374924 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" Nov 26 22:41:54 crc kubenswrapper[4903]: I1126 22:41:54.441885 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-f9gqr"] Nov 26 22:41:54 crc kubenswrapper[4903]: I1126 22:41:54.442113 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" podUID="f4d67314-0052-4f9e-9e9a-76f829dea702" containerName="dnsmasq-dns" containerID="cri-o://5348c272d0973eb267c4c74082e4d4fcada426624ae66fb34261132c7aeb3ec3" gracePeriod=10 Nov 26 22:41:54 crc kubenswrapper[4903]: I1126 22:41:54.660208 4903 generic.go:334] "Generic (PLEG): container finished" podID="f4d67314-0052-4f9e-9e9a-76f829dea702" containerID="5348c272d0973eb267c4c74082e4d4fcada426624ae66fb34261132c7aeb3ec3" exitCode=0 Nov 26 22:41:54 crc kubenswrapper[4903]: I1126 22:41:54.660263 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" event={"ID":"f4d67314-0052-4f9e-9e9a-76f829dea702","Type":"ContainerDied","Data":"5348c272d0973eb267c4c74082e4d4fcada426624ae66fb34261132c7aeb3ec3"} Nov 26 22:41:55 crc kubenswrapper[4903]: I1126 22:41:55.859762 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" podUID="f4d67314-0052-4f9e-9e9a-76f829dea702" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.172:5353: connect: connection refused" Nov 26 22:41:57 crc kubenswrapper[4903]: I1126 22:41:57.619512 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:57 crc kubenswrapper[4903]: I1126 22:41:57.630513 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:57 crc kubenswrapper[4903]: I1126 22:41:57.716406 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 26 22:41:58 crc kubenswrapper[4903]: I1126 22:41:58.133776 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-m4wm4" Nov 26 22:41:58 crc kubenswrapper[4903]: I1126 22:41:58.233378 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/325e2202-624d-4202-aa20-bd6007c642b5-config-data\") pod \"325e2202-624d-4202-aa20-bd6007c642b5\" (UID: \"325e2202-624d-4202-aa20-bd6007c642b5\") " Nov 26 22:41:58 crc kubenswrapper[4903]: I1126 22:41:58.233663 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/325e2202-624d-4202-aa20-bd6007c642b5-scripts\") pod \"325e2202-624d-4202-aa20-bd6007c642b5\" (UID: \"325e2202-624d-4202-aa20-bd6007c642b5\") " Nov 26 22:41:58 crc kubenswrapper[4903]: I1126 22:41:58.233780 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/325e2202-624d-4202-aa20-bd6007c642b5-combined-ca-bundle\") pod \"325e2202-624d-4202-aa20-bd6007c642b5\" (UID: \"325e2202-624d-4202-aa20-bd6007c642b5\") " Nov 26 22:41:58 crc kubenswrapper[4903]: I1126 22:41:58.233814 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/325e2202-624d-4202-aa20-bd6007c642b5-credential-keys\") pod \"325e2202-624d-4202-aa20-bd6007c642b5\" (UID: \"325e2202-624d-4202-aa20-bd6007c642b5\") " Nov 26 22:41:58 crc kubenswrapper[4903]: I1126 22:41:58.233884 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/325e2202-624d-4202-aa20-bd6007c642b5-fernet-keys\") pod \"325e2202-624d-4202-aa20-bd6007c642b5\" (UID: \"325e2202-624d-4202-aa20-bd6007c642b5\") " Nov 26 22:41:58 crc kubenswrapper[4903]: I1126 22:41:58.233936 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zw6nm\" (UniqueName: \"kubernetes.io/projected/325e2202-624d-4202-aa20-bd6007c642b5-kube-api-access-zw6nm\") pod \"325e2202-624d-4202-aa20-bd6007c642b5\" (UID: \"325e2202-624d-4202-aa20-bd6007c642b5\") " Nov 26 22:41:58 crc kubenswrapper[4903]: I1126 22:41:58.240817 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/325e2202-624d-4202-aa20-bd6007c642b5-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "325e2202-624d-4202-aa20-bd6007c642b5" (UID: "325e2202-624d-4202-aa20-bd6007c642b5"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:41:58 crc kubenswrapper[4903]: I1126 22:41:58.240977 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/325e2202-624d-4202-aa20-bd6007c642b5-kube-api-access-zw6nm" (OuterVolumeSpecName: "kube-api-access-zw6nm") pod "325e2202-624d-4202-aa20-bd6007c642b5" (UID: "325e2202-624d-4202-aa20-bd6007c642b5"). InnerVolumeSpecName "kube-api-access-zw6nm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:41:58 crc kubenswrapper[4903]: I1126 22:41:58.243458 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/325e2202-624d-4202-aa20-bd6007c642b5-scripts" (OuterVolumeSpecName: "scripts") pod "325e2202-624d-4202-aa20-bd6007c642b5" (UID: "325e2202-624d-4202-aa20-bd6007c642b5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:41:58 crc kubenswrapper[4903]: I1126 22:41:58.280594 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/325e2202-624d-4202-aa20-bd6007c642b5-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "325e2202-624d-4202-aa20-bd6007c642b5" (UID: "325e2202-624d-4202-aa20-bd6007c642b5"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:41:58 crc kubenswrapper[4903]: I1126 22:41:58.292955 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/325e2202-624d-4202-aa20-bd6007c642b5-config-data" (OuterVolumeSpecName: "config-data") pod "325e2202-624d-4202-aa20-bd6007c642b5" (UID: "325e2202-624d-4202-aa20-bd6007c642b5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:41:58 crc kubenswrapper[4903]: I1126 22:41:58.296186 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/325e2202-624d-4202-aa20-bd6007c642b5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "325e2202-624d-4202-aa20-bd6007c642b5" (UID: "325e2202-624d-4202-aa20-bd6007c642b5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:41:58 crc kubenswrapper[4903]: I1126 22:41:58.336648 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/325e2202-624d-4202-aa20-bd6007c642b5-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:58 crc kubenswrapper[4903]: I1126 22:41:58.336675 4903 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/325e2202-624d-4202-aa20-bd6007c642b5-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:58 crc kubenswrapper[4903]: I1126 22:41:58.336684 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/325e2202-624d-4202-aa20-bd6007c642b5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:58 crc kubenswrapper[4903]: I1126 22:41:58.336749 4903 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/325e2202-624d-4202-aa20-bd6007c642b5-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:58 crc kubenswrapper[4903]: I1126 22:41:58.336758 4903 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/325e2202-624d-4202-aa20-bd6007c642b5-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:58 crc kubenswrapper[4903]: I1126 22:41:58.336766 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zw6nm\" (UniqueName: \"kubernetes.io/projected/325e2202-624d-4202-aa20-bd6007c642b5-kube-api-access-zw6nm\") on node \"crc\" DevicePath \"\"" Nov 26 22:41:58 crc kubenswrapper[4903]: I1126 22:41:58.740110 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-m4wm4" Nov 26 22:41:58 crc kubenswrapper[4903]: I1126 22:41:58.744213 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-m4wm4" event={"ID":"325e2202-624d-4202-aa20-bd6007c642b5","Type":"ContainerDied","Data":"6da7de9e06c70964c0884bb7d79b3efc78a3b99dde82386d2e4ea9af36b7747b"} Nov 26 22:41:58 crc kubenswrapper[4903]: I1126 22:41:58.744265 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6da7de9e06c70964c0884bb7d79b3efc78a3b99dde82386d2e4ea9af36b7747b" Nov 26 22:41:58 crc kubenswrapper[4903]: E1126 22:41:58.833657 4903 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Nov 26 22:41:58 crc kubenswrapper[4903]: E1126 22:41:58.833842 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kdx64,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-w6n8h_openstack(f42951d5-40b8-4f39-8a87-5f7e5809bf87): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 22:41:58 crc kubenswrapper[4903]: E1126 22:41:58.835026 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-w6n8h" podUID="f42951d5-40b8-4f39-8a87-5f7e5809bf87" Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.221334 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-m4wm4"] Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.231661 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-m4wm4"] Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.334951 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-lhsbl"] Nov 26 22:41:59 crc kubenswrapper[4903]: E1126 22:41:59.335723 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7951138f-c694-4178-82da-419ea357195a" containerName="init" Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.335754 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="7951138f-c694-4178-82da-419ea357195a" containerName="init" Nov 26 22:41:59 crc kubenswrapper[4903]: E1126 22:41:59.335781 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="325e2202-624d-4202-aa20-bd6007c642b5" containerName="keystone-bootstrap" Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.335797 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="325e2202-624d-4202-aa20-bd6007c642b5" containerName="keystone-bootstrap" Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.336240 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="7951138f-c694-4178-82da-419ea357195a" containerName="init" Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.336272 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="325e2202-624d-4202-aa20-bd6007c642b5" containerName="keystone-bootstrap" Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.337588 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-lhsbl" Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.341530 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.341803 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.341977 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.342218 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.343375 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-pdkfz" Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.343455 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-lhsbl"] Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.461848 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-combined-ca-bundle\") pod \"keystone-bootstrap-lhsbl\" (UID: \"b33bddbf-64ec-40c6-a7ea-5919c5a1042d\") " pod="openstack/keystone-bootstrap-lhsbl" Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.461893 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-scripts\") pod \"keystone-bootstrap-lhsbl\" (UID: \"b33bddbf-64ec-40c6-a7ea-5919c5a1042d\") " pod="openstack/keystone-bootstrap-lhsbl" Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.461924 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-config-data\") pod \"keystone-bootstrap-lhsbl\" (UID: \"b33bddbf-64ec-40c6-a7ea-5919c5a1042d\") " pod="openstack/keystone-bootstrap-lhsbl" Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.462024 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5rgq\" (UniqueName: \"kubernetes.io/projected/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-kube-api-access-m5rgq\") pod \"keystone-bootstrap-lhsbl\" (UID: \"b33bddbf-64ec-40c6-a7ea-5919c5a1042d\") " pod="openstack/keystone-bootstrap-lhsbl" Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.462112 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-fernet-keys\") pod \"keystone-bootstrap-lhsbl\" (UID: \"b33bddbf-64ec-40c6-a7ea-5919c5a1042d\") " pod="openstack/keystone-bootstrap-lhsbl" Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.462167 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-credential-keys\") pod \"keystone-bootstrap-lhsbl\" (UID: \"b33bddbf-64ec-40c6-a7ea-5919c5a1042d\") " pod="openstack/keystone-bootstrap-lhsbl" Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.563963 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-combined-ca-bundle\") pod \"keystone-bootstrap-lhsbl\" (UID: \"b33bddbf-64ec-40c6-a7ea-5919c5a1042d\") " pod="openstack/keystone-bootstrap-lhsbl" Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.564039 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-scripts\") pod \"keystone-bootstrap-lhsbl\" (UID: \"b33bddbf-64ec-40c6-a7ea-5919c5a1042d\") " pod="openstack/keystone-bootstrap-lhsbl" Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.564096 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-config-data\") pod \"keystone-bootstrap-lhsbl\" (UID: \"b33bddbf-64ec-40c6-a7ea-5919c5a1042d\") " pod="openstack/keystone-bootstrap-lhsbl" Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.564222 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5rgq\" (UniqueName: \"kubernetes.io/projected/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-kube-api-access-m5rgq\") pod \"keystone-bootstrap-lhsbl\" (UID: \"b33bddbf-64ec-40c6-a7ea-5919c5a1042d\") " pod="openstack/keystone-bootstrap-lhsbl" Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.564302 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-fernet-keys\") pod \"keystone-bootstrap-lhsbl\" (UID: \"b33bddbf-64ec-40c6-a7ea-5919c5a1042d\") " pod="openstack/keystone-bootstrap-lhsbl" Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.564535 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-credential-keys\") pod \"keystone-bootstrap-lhsbl\" (UID: \"b33bddbf-64ec-40c6-a7ea-5919c5a1042d\") " pod="openstack/keystone-bootstrap-lhsbl" Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.572122 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-scripts\") pod \"keystone-bootstrap-lhsbl\" (UID: \"b33bddbf-64ec-40c6-a7ea-5919c5a1042d\") " pod="openstack/keystone-bootstrap-lhsbl" Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.572385 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-combined-ca-bundle\") pod \"keystone-bootstrap-lhsbl\" (UID: \"b33bddbf-64ec-40c6-a7ea-5919c5a1042d\") " pod="openstack/keystone-bootstrap-lhsbl" Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.572476 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-fernet-keys\") pod \"keystone-bootstrap-lhsbl\" (UID: \"b33bddbf-64ec-40c6-a7ea-5919c5a1042d\") " pod="openstack/keystone-bootstrap-lhsbl" Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.572485 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-credential-keys\") pod \"keystone-bootstrap-lhsbl\" (UID: \"b33bddbf-64ec-40c6-a7ea-5919c5a1042d\") " pod="openstack/keystone-bootstrap-lhsbl" Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.573216 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-config-data\") pod \"keystone-bootstrap-lhsbl\" (UID: \"b33bddbf-64ec-40c6-a7ea-5919c5a1042d\") " pod="openstack/keystone-bootstrap-lhsbl" Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.583404 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5rgq\" (UniqueName: \"kubernetes.io/projected/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-kube-api-access-m5rgq\") pod \"keystone-bootstrap-lhsbl\" (UID: \"b33bddbf-64ec-40c6-a7ea-5919c5a1042d\") " pod="openstack/keystone-bootstrap-lhsbl" Nov 26 22:41:59 crc kubenswrapper[4903]: I1126 22:41:59.671681 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-lhsbl" Nov 26 22:41:59 crc kubenswrapper[4903]: E1126 22:41:59.750527 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-w6n8h" podUID="f42951d5-40b8-4f39-8a87-5f7e5809bf87" Nov 26 22:42:00 crc kubenswrapper[4903]: I1126 22:42:00.048232 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="325e2202-624d-4202-aa20-bd6007c642b5" path="/var/lib/kubelet/pods/325e2202-624d-4202-aa20-bd6007c642b5/volumes" Nov 26 22:42:00 crc kubenswrapper[4903]: E1126 22:42:00.699320 4903 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified" Nov 26 22:42:00 crc kubenswrapper[4903]: E1126 22:42:00.699493 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5cbh6dh56bh585h59dh66dh75hc6h698h66fh56ch684h686h576h68hc7h5bh58bhbch557h569h675h5fbhf5hf5hcfhc7h696h68fh5dch5fchb4q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-84g89,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(e3b2305d-da0f-4efb-9ac8-3df8527f9dec): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 22:42:00 crc kubenswrapper[4903]: I1126 22:42:00.733456 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-tz77l" Nov 26 22:42:00 crc kubenswrapper[4903]: I1126 22:42:00.765188 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-tz77l" event={"ID":"2388445a-1656-41aa-8daa-a120993c24ad","Type":"ContainerDied","Data":"7b9fc8812304e421452e677cfcad31195cd15ae6a2c8e9db8349f42180da3931"} Nov 26 22:42:00 crc kubenswrapper[4903]: I1126 22:42:00.765226 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7b9fc8812304e421452e677cfcad31195cd15ae6a2c8e9db8349f42180da3931" Nov 26 22:42:00 crc kubenswrapper[4903]: I1126 22:42:00.765247 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-tz77l" Nov 26 22:42:00 crc kubenswrapper[4903]: I1126 22:42:00.789578 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2388445a-1656-41aa-8daa-a120993c24ad-combined-ca-bundle\") pod \"2388445a-1656-41aa-8daa-a120993c24ad\" (UID: \"2388445a-1656-41aa-8daa-a120993c24ad\") " Nov 26 22:42:00 crc kubenswrapper[4903]: I1126 22:42:00.789796 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2388445a-1656-41aa-8daa-a120993c24ad-db-sync-config-data\") pod \"2388445a-1656-41aa-8daa-a120993c24ad\" (UID: \"2388445a-1656-41aa-8daa-a120993c24ad\") " Nov 26 22:42:00 crc kubenswrapper[4903]: I1126 22:42:00.789910 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2388445a-1656-41aa-8daa-a120993c24ad-config-data\") pod \"2388445a-1656-41aa-8daa-a120993c24ad\" (UID: \"2388445a-1656-41aa-8daa-a120993c24ad\") " Nov 26 22:42:00 crc kubenswrapper[4903]: I1126 22:42:00.790003 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-74hsf\" (UniqueName: \"kubernetes.io/projected/2388445a-1656-41aa-8daa-a120993c24ad-kube-api-access-74hsf\") pod \"2388445a-1656-41aa-8daa-a120993c24ad\" (UID: \"2388445a-1656-41aa-8daa-a120993c24ad\") " Nov 26 22:42:00 crc kubenswrapper[4903]: I1126 22:42:00.798923 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2388445a-1656-41aa-8daa-a120993c24ad-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "2388445a-1656-41aa-8daa-a120993c24ad" (UID: "2388445a-1656-41aa-8daa-a120993c24ad"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:00 crc kubenswrapper[4903]: I1126 22:42:00.805881 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2388445a-1656-41aa-8daa-a120993c24ad-kube-api-access-74hsf" (OuterVolumeSpecName: "kube-api-access-74hsf") pod "2388445a-1656-41aa-8daa-a120993c24ad" (UID: "2388445a-1656-41aa-8daa-a120993c24ad"). InnerVolumeSpecName "kube-api-access-74hsf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:42:00 crc kubenswrapper[4903]: I1126 22:42:00.827710 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2388445a-1656-41aa-8daa-a120993c24ad-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2388445a-1656-41aa-8daa-a120993c24ad" (UID: "2388445a-1656-41aa-8daa-a120993c24ad"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:00 crc kubenswrapper[4903]: I1126 22:42:00.866415 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2388445a-1656-41aa-8daa-a120993c24ad-config-data" (OuterVolumeSpecName: "config-data") pod "2388445a-1656-41aa-8daa-a120993c24ad" (UID: "2388445a-1656-41aa-8daa-a120993c24ad"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:00 crc kubenswrapper[4903]: I1126 22:42:00.893285 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2388445a-1656-41aa-8daa-a120993c24ad-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:00 crc kubenswrapper[4903]: I1126 22:42:00.893327 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-74hsf\" (UniqueName: \"kubernetes.io/projected/2388445a-1656-41aa-8daa-a120993c24ad-kube-api-access-74hsf\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:00 crc kubenswrapper[4903]: I1126 22:42:00.893340 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2388445a-1656-41aa-8daa-a120993c24ad-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:00 crc kubenswrapper[4903]: I1126 22:42:00.893351 4903 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2388445a-1656-41aa-8daa-a120993c24ad-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:01 crc kubenswrapper[4903]: I1126 22:42:01.980926 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 22:42:01 crc kubenswrapper[4903]: I1126 22:42:01.981006 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 22:42:02 crc kubenswrapper[4903]: I1126 22:42:02.147929 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-lv8x6"] Nov 26 22:42:02 crc kubenswrapper[4903]: E1126 22:42:02.148443 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2388445a-1656-41aa-8daa-a120993c24ad" containerName="glance-db-sync" Nov 26 22:42:02 crc kubenswrapper[4903]: I1126 22:42:02.148455 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="2388445a-1656-41aa-8daa-a120993c24ad" containerName="glance-db-sync" Nov 26 22:42:02 crc kubenswrapper[4903]: I1126 22:42:02.148670 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="2388445a-1656-41aa-8daa-a120993c24ad" containerName="glance-db-sync" Nov 26 22:42:02 crc kubenswrapper[4903]: I1126 22:42:02.151798 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-lv8x6" Nov 26 22:42:02 crc kubenswrapper[4903]: I1126 22:42:02.160237 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-lv8x6"] Nov 26 22:42:02 crc kubenswrapper[4903]: I1126 22:42:02.222991 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-dns-swift-storage-0\") pod \"dnsmasq-dns-785d8bcb8c-lv8x6\" (UID: \"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lv8x6" Nov 26 22:42:02 crc kubenswrapper[4903]: I1126 22:42:02.223277 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-config\") pod \"dnsmasq-dns-785d8bcb8c-lv8x6\" (UID: \"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lv8x6" Nov 26 22:42:02 crc kubenswrapper[4903]: I1126 22:42:02.223343 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-ovsdbserver-nb\") pod \"dnsmasq-dns-785d8bcb8c-lv8x6\" (UID: \"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lv8x6" Nov 26 22:42:02 crc kubenswrapper[4903]: I1126 22:42:02.223376 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-dns-svc\") pod \"dnsmasq-dns-785d8bcb8c-lv8x6\" (UID: \"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lv8x6" Nov 26 22:42:02 crc kubenswrapper[4903]: I1126 22:42:02.223408 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sm4ss\" (UniqueName: \"kubernetes.io/projected/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-kube-api-access-sm4ss\") pod \"dnsmasq-dns-785d8bcb8c-lv8x6\" (UID: \"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lv8x6" Nov 26 22:42:02 crc kubenswrapper[4903]: I1126 22:42:02.223440 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-ovsdbserver-sb\") pod \"dnsmasq-dns-785d8bcb8c-lv8x6\" (UID: \"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lv8x6" Nov 26 22:42:02 crc kubenswrapper[4903]: I1126 22:42:02.326725 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sm4ss\" (UniqueName: \"kubernetes.io/projected/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-kube-api-access-sm4ss\") pod \"dnsmasq-dns-785d8bcb8c-lv8x6\" (UID: \"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lv8x6" Nov 26 22:42:02 crc kubenswrapper[4903]: I1126 22:42:02.326823 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-ovsdbserver-sb\") pod \"dnsmasq-dns-785d8bcb8c-lv8x6\" (UID: \"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lv8x6" Nov 26 22:42:02 crc kubenswrapper[4903]: I1126 22:42:02.326947 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-dns-swift-storage-0\") pod \"dnsmasq-dns-785d8bcb8c-lv8x6\" (UID: \"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lv8x6" Nov 26 22:42:02 crc kubenswrapper[4903]: I1126 22:42:02.326972 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-config\") pod \"dnsmasq-dns-785d8bcb8c-lv8x6\" (UID: \"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lv8x6" Nov 26 22:42:02 crc kubenswrapper[4903]: I1126 22:42:02.327069 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-ovsdbserver-nb\") pod \"dnsmasq-dns-785d8bcb8c-lv8x6\" (UID: \"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lv8x6" Nov 26 22:42:02 crc kubenswrapper[4903]: I1126 22:42:02.327130 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-dns-svc\") pod \"dnsmasq-dns-785d8bcb8c-lv8x6\" (UID: \"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lv8x6" Nov 26 22:42:02 crc kubenswrapper[4903]: I1126 22:42:02.327823 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-config\") pod \"dnsmasq-dns-785d8bcb8c-lv8x6\" (UID: \"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lv8x6" Nov 26 22:42:02 crc kubenswrapper[4903]: I1126 22:42:02.328351 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-dns-swift-storage-0\") pod \"dnsmasq-dns-785d8bcb8c-lv8x6\" (UID: \"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lv8x6" Nov 26 22:42:02 crc kubenswrapper[4903]: I1126 22:42:02.328854 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-ovsdbserver-nb\") pod \"dnsmasq-dns-785d8bcb8c-lv8x6\" (UID: \"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lv8x6" Nov 26 22:42:02 crc kubenswrapper[4903]: I1126 22:42:02.328965 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-ovsdbserver-sb\") pod \"dnsmasq-dns-785d8bcb8c-lv8x6\" (UID: \"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lv8x6" Nov 26 22:42:02 crc kubenswrapper[4903]: I1126 22:42:02.335890 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-dns-svc\") pod \"dnsmasq-dns-785d8bcb8c-lv8x6\" (UID: \"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lv8x6" Nov 26 22:42:02 crc kubenswrapper[4903]: I1126 22:42:02.346417 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sm4ss\" (UniqueName: \"kubernetes.io/projected/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-kube-api-access-sm4ss\") pod \"dnsmasq-dns-785d8bcb8c-lv8x6\" (UID: \"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5\") " pod="openstack/dnsmasq-dns-785d8bcb8c-lv8x6" Nov 26 22:42:02 crc kubenswrapper[4903]: I1126 22:42:02.506162 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-lv8x6" Nov 26 22:42:02 crc kubenswrapper[4903]: I1126 22:42:02.797137 4903 generic.go:334] "Generic (PLEG): container finished" podID="e30967f0-d295-4017-a586-5b1afdbcd625" containerID="08c1c908848efb6fe54928f091e9fc6bc99f387b7e4f214ed95ed5e31db1b743" exitCode=0 Nov 26 22:42:02 crc kubenswrapper[4903]: I1126 22:42:02.797181 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-fv6wn" event={"ID":"e30967f0-d295-4017-a586-5b1afdbcd625","Type":"ContainerDied","Data":"08c1c908848efb6fe54928f091e9fc6bc99f387b7e4f214ed95ed5e31db1b743"} Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.078125 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.081614 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.085628 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.085803 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-m2kjp" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.087895 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.102356 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.155268 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99dddd38-b1aa-49b9-82c2-4ac85598ef74-config-data\") pod \"glance-default-external-api-0\" (UID: \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.155562 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99dddd38-b1aa-49b9-82c2-4ac85598ef74-logs\") pod \"glance-default-external-api-0\" (UID: \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.155645 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.155734 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d64dv\" (UniqueName: \"kubernetes.io/projected/99dddd38-b1aa-49b9-82c2-4ac85598ef74-kube-api-access-d64dv\") pod \"glance-default-external-api-0\" (UID: \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.155856 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/99dddd38-b1aa-49b9-82c2-4ac85598ef74-scripts\") pod \"glance-default-external-api-0\" (UID: \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.155944 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/99dddd38-b1aa-49b9-82c2-4ac85598ef74-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.156049 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99dddd38-b1aa-49b9-82c2-4ac85598ef74-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.257751 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99dddd38-b1aa-49b9-82c2-4ac85598ef74-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.257854 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99dddd38-b1aa-49b9-82c2-4ac85598ef74-config-data\") pod \"glance-default-external-api-0\" (UID: \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.257912 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99dddd38-b1aa-49b9-82c2-4ac85598ef74-logs\") pod \"glance-default-external-api-0\" (UID: \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.257934 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.257956 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d64dv\" (UniqueName: \"kubernetes.io/projected/99dddd38-b1aa-49b9-82c2-4ac85598ef74-kube-api-access-d64dv\") pod \"glance-default-external-api-0\" (UID: \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.258012 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/99dddd38-b1aa-49b9-82c2-4ac85598ef74-scripts\") pod \"glance-default-external-api-0\" (UID: \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.258046 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/99dddd38-b1aa-49b9-82c2-4ac85598ef74-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.258468 4903 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.258509 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/99dddd38-b1aa-49b9-82c2-4ac85598ef74-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.262676 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99dddd38-b1aa-49b9-82c2-4ac85598ef74-logs\") pod \"glance-default-external-api-0\" (UID: \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.263204 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99dddd38-b1aa-49b9-82c2-4ac85598ef74-config-data\") pod \"glance-default-external-api-0\" (UID: \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.264132 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/99dddd38-b1aa-49b9-82c2-4ac85598ef74-scripts\") pod \"glance-default-external-api-0\" (UID: \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.271203 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99dddd38-b1aa-49b9-82c2-4ac85598ef74-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.274373 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d64dv\" (UniqueName: \"kubernetes.io/projected/99dddd38-b1aa-49b9-82c2-4ac85598ef74-kube-api-access-d64dv\") pod \"glance-default-external-api-0\" (UID: \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.296101 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.301155 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.307376 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.314811 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.316841 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.359871 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"40ee578a-9b83-43a6-a627-550a1f6fa958\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.359911 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40ee578a-9b83-43a6-a627-550a1f6fa958-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"40ee578a-9b83-43a6-a627-550a1f6fa958\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.359932 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/40ee578a-9b83-43a6-a627-550a1f6fa958-logs\") pod \"glance-default-internal-api-0\" (UID: \"40ee578a-9b83-43a6-a627-550a1f6fa958\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.359961 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/40ee578a-9b83-43a6-a627-550a1f6fa958-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"40ee578a-9b83-43a6-a627-550a1f6fa958\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.359982 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40ee578a-9b83-43a6-a627-550a1f6fa958-config-data\") pod \"glance-default-internal-api-0\" (UID: \"40ee578a-9b83-43a6-a627-550a1f6fa958\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.360003 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40ee578a-9b83-43a6-a627-550a1f6fa958-scripts\") pod \"glance-default-internal-api-0\" (UID: \"40ee578a-9b83-43a6-a627-550a1f6fa958\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.360077 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phh5w\" (UniqueName: \"kubernetes.io/projected/40ee578a-9b83-43a6-a627-550a1f6fa958-kube-api-access-phh5w\") pod \"glance-default-internal-api-0\" (UID: \"40ee578a-9b83-43a6-a627-550a1f6fa958\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.426348 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.462084 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phh5w\" (UniqueName: \"kubernetes.io/projected/40ee578a-9b83-43a6-a627-550a1f6fa958-kube-api-access-phh5w\") pod \"glance-default-internal-api-0\" (UID: \"40ee578a-9b83-43a6-a627-550a1f6fa958\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.462680 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"40ee578a-9b83-43a6-a627-550a1f6fa958\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.462912 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40ee578a-9b83-43a6-a627-550a1f6fa958-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"40ee578a-9b83-43a6-a627-550a1f6fa958\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.463438 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/40ee578a-9b83-43a6-a627-550a1f6fa958-logs\") pod \"glance-default-internal-api-0\" (UID: \"40ee578a-9b83-43a6-a627-550a1f6fa958\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.463554 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/40ee578a-9b83-43a6-a627-550a1f6fa958-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"40ee578a-9b83-43a6-a627-550a1f6fa958\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.463641 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40ee578a-9b83-43a6-a627-550a1f6fa958-config-data\") pod \"glance-default-internal-api-0\" (UID: \"40ee578a-9b83-43a6-a627-550a1f6fa958\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.463763 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40ee578a-9b83-43a6-a627-550a1f6fa958-scripts\") pod \"glance-default-internal-api-0\" (UID: \"40ee578a-9b83-43a6-a627-550a1f6fa958\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.462874 4903 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"40ee578a-9b83-43a6-a627-550a1f6fa958\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-internal-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.463911 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/40ee578a-9b83-43a6-a627-550a1f6fa958-logs\") pod \"glance-default-internal-api-0\" (UID: \"40ee578a-9b83-43a6-a627-550a1f6fa958\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.463967 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/40ee578a-9b83-43a6-a627-550a1f6fa958-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"40ee578a-9b83-43a6-a627-550a1f6fa958\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.470619 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40ee578a-9b83-43a6-a627-550a1f6fa958-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"40ee578a-9b83-43a6-a627-550a1f6fa958\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.489995 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phh5w\" (UniqueName: \"kubernetes.io/projected/40ee578a-9b83-43a6-a627-550a1f6fa958-kube-api-access-phh5w\") pod \"glance-default-internal-api-0\" (UID: \"40ee578a-9b83-43a6-a627-550a1f6fa958\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.490079 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40ee578a-9b83-43a6-a627-550a1f6fa958-config-data\") pod \"glance-default-internal-api-0\" (UID: \"40ee578a-9b83-43a6-a627-550a1f6fa958\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.492193 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40ee578a-9b83-43a6-a627-550a1f6fa958-scripts\") pod \"glance-default-internal-api-0\" (UID: \"40ee578a-9b83-43a6-a627-550a1f6fa958\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.536406 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"40ee578a-9b83-43a6-a627-550a1f6fa958\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:03 crc kubenswrapper[4903]: I1126 22:42:03.719643 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 22:42:04 crc kubenswrapper[4903]: I1126 22:42:04.894788 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 22:42:05 crc kubenswrapper[4903]: I1126 22:42:05.019573 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 22:42:05 crc kubenswrapper[4903]: I1126 22:42:05.859558 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" podUID="f4d67314-0052-4f9e-9e9a-76f829dea702" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.172:5353: i/o timeout" Nov 26 22:42:10 crc kubenswrapper[4903]: I1126 22:42:10.860128 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" podUID="f4d67314-0052-4f9e-9e9a-76f829dea702" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.172:5353: i/o timeout" Nov 26 22:42:10 crc kubenswrapper[4903]: I1126 22:42:10.861433 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.091266 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.103235 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-fv6wn" Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.253397 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e30967f0-d295-4017-a586-5b1afdbcd625-combined-ca-bundle\") pod \"e30967f0-d295-4017-a586-5b1afdbcd625\" (UID: \"e30967f0-d295-4017-a586-5b1afdbcd625\") " Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.253465 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e30967f0-d295-4017-a586-5b1afdbcd625-config\") pod \"e30967f0-d295-4017-a586-5b1afdbcd625\" (UID: \"e30967f0-d295-4017-a586-5b1afdbcd625\") " Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.253514 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f4d67314-0052-4f9e-9e9a-76f829dea702-dns-svc\") pod \"f4d67314-0052-4f9e-9e9a-76f829dea702\" (UID: \"f4d67314-0052-4f9e-9e9a-76f829dea702\") " Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.253537 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z295k\" (UniqueName: \"kubernetes.io/projected/f4d67314-0052-4f9e-9e9a-76f829dea702-kube-api-access-z295k\") pod \"f4d67314-0052-4f9e-9e9a-76f829dea702\" (UID: \"f4d67314-0052-4f9e-9e9a-76f829dea702\") " Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.253596 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f4d67314-0052-4f9e-9e9a-76f829dea702-ovsdbserver-nb\") pod \"f4d67314-0052-4f9e-9e9a-76f829dea702\" (UID: \"f4d67314-0052-4f9e-9e9a-76f829dea702\") " Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.253611 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f4d67314-0052-4f9e-9e9a-76f829dea702-ovsdbserver-sb\") pod \"f4d67314-0052-4f9e-9e9a-76f829dea702\" (UID: \"f4d67314-0052-4f9e-9e9a-76f829dea702\") " Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.253710 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4d67314-0052-4f9e-9e9a-76f829dea702-config\") pod \"f4d67314-0052-4f9e-9e9a-76f829dea702\" (UID: \"f4d67314-0052-4f9e-9e9a-76f829dea702\") " Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.253738 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qzhzr\" (UniqueName: \"kubernetes.io/projected/e30967f0-d295-4017-a586-5b1afdbcd625-kube-api-access-qzhzr\") pod \"e30967f0-d295-4017-a586-5b1afdbcd625\" (UID: \"e30967f0-d295-4017-a586-5b1afdbcd625\") " Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.253804 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f4d67314-0052-4f9e-9e9a-76f829dea702-dns-swift-storage-0\") pod \"f4d67314-0052-4f9e-9e9a-76f829dea702\" (UID: \"f4d67314-0052-4f9e-9e9a-76f829dea702\") " Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.260076 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e30967f0-d295-4017-a586-5b1afdbcd625-kube-api-access-qzhzr" (OuterVolumeSpecName: "kube-api-access-qzhzr") pod "e30967f0-d295-4017-a586-5b1afdbcd625" (UID: "e30967f0-d295-4017-a586-5b1afdbcd625"). InnerVolumeSpecName "kube-api-access-qzhzr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.260204 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4d67314-0052-4f9e-9e9a-76f829dea702-kube-api-access-z295k" (OuterVolumeSpecName: "kube-api-access-z295k") pod "f4d67314-0052-4f9e-9e9a-76f829dea702" (UID: "f4d67314-0052-4f9e-9e9a-76f829dea702"). InnerVolumeSpecName "kube-api-access-z295k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.290395 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e30967f0-d295-4017-a586-5b1afdbcd625-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e30967f0-d295-4017-a586-5b1afdbcd625" (UID: "e30967f0-d295-4017-a586-5b1afdbcd625"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.304791 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4d67314-0052-4f9e-9e9a-76f829dea702-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "f4d67314-0052-4f9e-9e9a-76f829dea702" (UID: "f4d67314-0052-4f9e-9e9a-76f829dea702"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.318573 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4d67314-0052-4f9e-9e9a-76f829dea702-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f4d67314-0052-4f9e-9e9a-76f829dea702" (UID: "f4d67314-0052-4f9e-9e9a-76f829dea702"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.323575 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4d67314-0052-4f9e-9e9a-76f829dea702-config" (OuterVolumeSpecName: "config") pod "f4d67314-0052-4f9e-9e9a-76f829dea702" (UID: "f4d67314-0052-4f9e-9e9a-76f829dea702"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.326163 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e30967f0-d295-4017-a586-5b1afdbcd625-config" (OuterVolumeSpecName: "config") pod "e30967f0-d295-4017-a586-5b1afdbcd625" (UID: "e30967f0-d295-4017-a586-5b1afdbcd625"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.329653 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4d67314-0052-4f9e-9e9a-76f829dea702-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f4d67314-0052-4f9e-9e9a-76f829dea702" (UID: "f4d67314-0052-4f9e-9e9a-76f829dea702"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.333575 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4d67314-0052-4f9e-9e9a-76f829dea702-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f4d67314-0052-4f9e-9e9a-76f829dea702" (UID: "f4d67314-0052-4f9e-9e9a-76f829dea702"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.356445 4903 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f4d67314-0052-4f9e-9e9a-76f829dea702-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.356470 4903 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f4d67314-0052-4f9e-9e9a-76f829dea702-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.356480 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4d67314-0052-4f9e-9e9a-76f829dea702-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.356490 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qzhzr\" (UniqueName: \"kubernetes.io/projected/e30967f0-d295-4017-a586-5b1afdbcd625-kube-api-access-qzhzr\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.356501 4903 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f4d67314-0052-4f9e-9e9a-76f829dea702-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.356509 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e30967f0-d295-4017-a586-5b1afdbcd625-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.356518 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/e30967f0-d295-4017-a586-5b1afdbcd625-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.356527 4903 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f4d67314-0052-4f9e-9e9a-76f829dea702-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.356536 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z295k\" (UniqueName: \"kubernetes.io/projected/f4d67314-0052-4f9e-9e9a-76f829dea702-kube-api-access-z295k\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.917320 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" event={"ID":"f4d67314-0052-4f9e-9e9a-76f829dea702","Type":"ContainerDied","Data":"e39ece1fc22b955fceab59fc67203814178ba68ec45e8adc037bc16635bf0d4c"} Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.917396 4903 scope.go:117] "RemoveContainer" containerID="5348c272d0973eb267c4c74082e4d4fcada426624ae66fb34261132c7aeb3ec3" Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.917573 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.925813 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-fv6wn" event={"ID":"e30967f0-d295-4017-a586-5b1afdbcd625","Type":"ContainerDied","Data":"82cd0f839132ee3c57e2a1ebb7218094461e7e1ef4e959d731188f2efa02cd1a"} Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.925855 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="82cd0f839132ee3c57e2a1ebb7218094461e7e1ef4e959d731188f2efa02cd1a" Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.925930 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-fv6wn" Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.971926 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-f9gqr"] Nov 26 22:42:11 crc kubenswrapper[4903]: I1126 22:42:11.984333 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-f9gqr"] Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.049742 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4d67314-0052-4f9e-9e9a-76f829dea702" path="/var/lib/kubelet/pods/f4d67314-0052-4f9e-9e9a-76f829dea702/volumes" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.310567 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-lv8x6"] Nov 26 22:42:12 crc kubenswrapper[4903]: E1126 22:42:12.400874 4903 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Nov 26 22:42:12 crc kubenswrapper[4903]: E1126 22:42:12.401058 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-26f6j,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-fxksz_openstack(d0b2d5fd-9425-4082-b1cc-3ce796c82e0c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.401361 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-qttx2"] Nov 26 22:42:12 crc kubenswrapper[4903]: E1126 22:42:12.402612 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e30967f0-d295-4017-a586-5b1afdbcd625" containerName="neutron-db-sync" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.402625 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="e30967f0-d295-4017-a586-5b1afdbcd625" containerName="neutron-db-sync" Nov 26 22:42:12 crc kubenswrapper[4903]: E1126 22:42:12.402658 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4d67314-0052-4f9e-9e9a-76f829dea702" containerName="init" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.402664 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4d67314-0052-4f9e-9e9a-76f829dea702" containerName="init" Nov 26 22:42:12 crc kubenswrapper[4903]: E1126 22:42:12.402671 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4d67314-0052-4f9e-9e9a-76f829dea702" containerName="dnsmasq-dns" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.402678 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4d67314-0052-4f9e-9e9a-76f829dea702" containerName="dnsmasq-dns" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.403620 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="e30967f0-d295-4017-a586-5b1afdbcd625" containerName="neutron-db-sync" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.403650 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4d67314-0052-4f9e-9e9a-76f829dea702" containerName="dnsmasq-dns" Nov 26 22:42:12 crc kubenswrapper[4903]: E1126 22:42:12.409269 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-fxksz" podUID="d0b2d5fd-9425-4082-b1cc-3ce796c82e0c" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.459591 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-qttx2"] Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.459629 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-67dffd5468-nzpt8"] Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.459746 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-qttx2" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.491032 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-67dffd5468-nzpt8" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.492174 4903 scope.go:117] "RemoveContainer" containerID="9661d7c6349fc333fc84235b786844dba28b19d35dcf6a92fcc9cedc6523d376" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.494842 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.495017 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-lzclj" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.495050 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.495136 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.497559 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-67dffd5468-nzpt8"] Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.631353 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/892b1cdc-def0-4620-b3b6-d9cc248b33bb-combined-ca-bundle\") pod \"neutron-67dffd5468-nzpt8\" (UID: \"892b1cdc-def0-4620-b3b6-d9cc248b33bb\") " pod="openstack/neutron-67dffd5468-nzpt8" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.631574 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/892b1cdc-def0-4620-b3b6-d9cc248b33bb-config\") pod \"neutron-67dffd5468-nzpt8\" (UID: \"892b1cdc-def0-4620-b3b6-d9cc248b33bb\") " pod="openstack/neutron-67dffd5468-nzpt8" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.631611 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/892b1cdc-def0-4620-b3b6-d9cc248b33bb-httpd-config\") pod \"neutron-67dffd5468-nzpt8\" (UID: \"892b1cdc-def0-4620-b3b6-d9cc248b33bb\") " pod="openstack/neutron-67dffd5468-nzpt8" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.631635 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/63158eb2-a413-4b83-9218-cabc20543498-dns-svc\") pod \"dnsmasq-dns-55f844cf75-qttx2\" (UID: \"63158eb2-a413-4b83-9218-cabc20543498\") " pod="openstack/dnsmasq-dns-55f844cf75-qttx2" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.631677 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/63158eb2-a413-4b83-9218-cabc20543498-ovsdbserver-sb\") pod \"dnsmasq-dns-55f844cf75-qttx2\" (UID: \"63158eb2-a413-4b83-9218-cabc20543498\") " pod="openstack/dnsmasq-dns-55f844cf75-qttx2" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.631706 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/63158eb2-a413-4b83-9218-cabc20543498-ovsdbserver-nb\") pod \"dnsmasq-dns-55f844cf75-qttx2\" (UID: \"63158eb2-a413-4b83-9218-cabc20543498\") " pod="openstack/dnsmasq-dns-55f844cf75-qttx2" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.631983 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vwc8l\" (UniqueName: \"kubernetes.io/projected/63158eb2-a413-4b83-9218-cabc20543498-kube-api-access-vwc8l\") pod \"dnsmasq-dns-55f844cf75-qttx2\" (UID: \"63158eb2-a413-4b83-9218-cabc20543498\") " pod="openstack/dnsmasq-dns-55f844cf75-qttx2" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.632007 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b8pzp\" (UniqueName: \"kubernetes.io/projected/892b1cdc-def0-4620-b3b6-d9cc248b33bb-kube-api-access-b8pzp\") pod \"neutron-67dffd5468-nzpt8\" (UID: \"892b1cdc-def0-4620-b3b6-d9cc248b33bb\") " pod="openstack/neutron-67dffd5468-nzpt8" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.632051 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/63158eb2-a413-4b83-9218-cabc20543498-dns-swift-storage-0\") pod \"dnsmasq-dns-55f844cf75-qttx2\" (UID: \"63158eb2-a413-4b83-9218-cabc20543498\") " pod="openstack/dnsmasq-dns-55f844cf75-qttx2" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.632123 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/63158eb2-a413-4b83-9218-cabc20543498-config\") pod \"dnsmasq-dns-55f844cf75-qttx2\" (UID: \"63158eb2-a413-4b83-9218-cabc20543498\") " pod="openstack/dnsmasq-dns-55f844cf75-qttx2" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.632164 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/892b1cdc-def0-4620-b3b6-d9cc248b33bb-ovndb-tls-certs\") pod \"neutron-67dffd5468-nzpt8\" (UID: \"892b1cdc-def0-4620-b3b6-d9cc248b33bb\") " pod="openstack/neutron-67dffd5468-nzpt8" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.733342 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/63158eb2-a413-4b83-9218-cabc20543498-config\") pod \"dnsmasq-dns-55f844cf75-qttx2\" (UID: \"63158eb2-a413-4b83-9218-cabc20543498\") " pod="openstack/dnsmasq-dns-55f844cf75-qttx2" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.733389 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/892b1cdc-def0-4620-b3b6-d9cc248b33bb-ovndb-tls-certs\") pod \"neutron-67dffd5468-nzpt8\" (UID: \"892b1cdc-def0-4620-b3b6-d9cc248b33bb\") " pod="openstack/neutron-67dffd5468-nzpt8" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.733424 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/892b1cdc-def0-4620-b3b6-d9cc248b33bb-combined-ca-bundle\") pod \"neutron-67dffd5468-nzpt8\" (UID: \"892b1cdc-def0-4620-b3b6-d9cc248b33bb\") " pod="openstack/neutron-67dffd5468-nzpt8" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.733447 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/892b1cdc-def0-4620-b3b6-d9cc248b33bb-config\") pod \"neutron-67dffd5468-nzpt8\" (UID: \"892b1cdc-def0-4620-b3b6-d9cc248b33bb\") " pod="openstack/neutron-67dffd5468-nzpt8" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.733483 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/892b1cdc-def0-4620-b3b6-d9cc248b33bb-httpd-config\") pod \"neutron-67dffd5468-nzpt8\" (UID: \"892b1cdc-def0-4620-b3b6-d9cc248b33bb\") " pod="openstack/neutron-67dffd5468-nzpt8" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.733510 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/63158eb2-a413-4b83-9218-cabc20543498-dns-svc\") pod \"dnsmasq-dns-55f844cf75-qttx2\" (UID: \"63158eb2-a413-4b83-9218-cabc20543498\") " pod="openstack/dnsmasq-dns-55f844cf75-qttx2" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.733556 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/63158eb2-a413-4b83-9218-cabc20543498-ovsdbserver-sb\") pod \"dnsmasq-dns-55f844cf75-qttx2\" (UID: \"63158eb2-a413-4b83-9218-cabc20543498\") " pod="openstack/dnsmasq-dns-55f844cf75-qttx2" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.733576 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/63158eb2-a413-4b83-9218-cabc20543498-ovsdbserver-nb\") pod \"dnsmasq-dns-55f844cf75-qttx2\" (UID: \"63158eb2-a413-4b83-9218-cabc20543498\") " pod="openstack/dnsmasq-dns-55f844cf75-qttx2" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.733603 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vwc8l\" (UniqueName: \"kubernetes.io/projected/63158eb2-a413-4b83-9218-cabc20543498-kube-api-access-vwc8l\") pod \"dnsmasq-dns-55f844cf75-qttx2\" (UID: \"63158eb2-a413-4b83-9218-cabc20543498\") " pod="openstack/dnsmasq-dns-55f844cf75-qttx2" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.733619 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b8pzp\" (UniqueName: \"kubernetes.io/projected/892b1cdc-def0-4620-b3b6-d9cc248b33bb-kube-api-access-b8pzp\") pod \"neutron-67dffd5468-nzpt8\" (UID: \"892b1cdc-def0-4620-b3b6-d9cc248b33bb\") " pod="openstack/neutron-67dffd5468-nzpt8" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.733648 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/63158eb2-a413-4b83-9218-cabc20543498-dns-swift-storage-0\") pod \"dnsmasq-dns-55f844cf75-qttx2\" (UID: \"63158eb2-a413-4b83-9218-cabc20543498\") " pod="openstack/dnsmasq-dns-55f844cf75-qttx2" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.734485 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/63158eb2-a413-4b83-9218-cabc20543498-dns-swift-storage-0\") pod \"dnsmasq-dns-55f844cf75-qttx2\" (UID: \"63158eb2-a413-4b83-9218-cabc20543498\") " pod="openstack/dnsmasq-dns-55f844cf75-qttx2" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.735395 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/63158eb2-a413-4b83-9218-cabc20543498-config\") pod \"dnsmasq-dns-55f844cf75-qttx2\" (UID: \"63158eb2-a413-4b83-9218-cabc20543498\") " pod="openstack/dnsmasq-dns-55f844cf75-qttx2" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.735442 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/63158eb2-a413-4b83-9218-cabc20543498-ovsdbserver-nb\") pod \"dnsmasq-dns-55f844cf75-qttx2\" (UID: \"63158eb2-a413-4b83-9218-cabc20543498\") " pod="openstack/dnsmasq-dns-55f844cf75-qttx2" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.735607 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/63158eb2-a413-4b83-9218-cabc20543498-dns-svc\") pod \"dnsmasq-dns-55f844cf75-qttx2\" (UID: \"63158eb2-a413-4b83-9218-cabc20543498\") " pod="openstack/dnsmasq-dns-55f844cf75-qttx2" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.736118 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/63158eb2-a413-4b83-9218-cabc20543498-ovsdbserver-sb\") pod \"dnsmasq-dns-55f844cf75-qttx2\" (UID: \"63158eb2-a413-4b83-9218-cabc20543498\") " pod="openstack/dnsmasq-dns-55f844cf75-qttx2" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.739295 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/892b1cdc-def0-4620-b3b6-d9cc248b33bb-config\") pod \"neutron-67dffd5468-nzpt8\" (UID: \"892b1cdc-def0-4620-b3b6-d9cc248b33bb\") " pod="openstack/neutron-67dffd5468-nzpt8" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.739794 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/892b1cdc-def0-4620-b3b6-d9cc248b33bb-httpd-config\") pod \"neutron-67dffd5468-nzpt8\" (UID: \"892b1cdc-def0-4620-b3b6-d9cc248b33bb\") " pod="openstack/neutron-67dffd5468-nzpt8" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.740144 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/892b1cdc-def0-4620-b3b6-d9cc248b33bb-ovndb-tls-certs\") pod \"neutron-67dffd5468-nzpt8\" (UID: \"892b1cdc-def0-4620-b3b6-d9cc248b33bb\") " pod="openstack/neutron-67dffd5468-nzpt8" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.742481 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/892b1cdc-def0-4620-b3b6-d9cc248b33bb-combined-ca-bundle\") pod \"neutron-67dffd5468-nzpt8\" (UID: \"892b1cdc-def0-4620-b3b6-d9cc248b33bb\") " pod="openstack/neutron-67dffd5468-nzpt8" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.750102 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b8pzp\" (UniqueName: \"kubernetes.io/projected/892b1cdc-def0-4620-b3b6-d9cc248b33bb-kube-api-access-b8pzp\") pod \"neutron-67dffd5468-nzpt8\" (UID: \"892b1cdc-def0-4620-b3b6-d9cc248b33bb\") " pod="openstack/neutron-67dffd5468-nzpt8" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.750459 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vwc8l\" (UniqueName: \"kubernetes.io/projected/63158eb2-a413-4b83-9218-cabc20543498-kube-api-access-vwc8l\") pod \"dnsmasq-dns-55f844cf75-qttx2\" (UID: \"63158eb2-a413-4b83-9218-cabc20543498\") " pod="openstack/dnsmasq-dns-55f844cf75-qttx2" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.933954 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-qttx2" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.957789 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-67dffd5468-nzpt8" Nov 26 22:42:12 crc kubenswrapper[4903]: E1126 22:42:12.971933 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-fxksz" podUID="d0b2d5fd-9425-4082-b1cc-3ce796c82e0c" Nov 26 22:42:12 crc kubenswrapper[4903]: I1126 22:42:12.985261 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-lhsbl"] Nov 26 22:42:13 crc kubenswrapper[4903]: I1126 22:42:13.224615 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 22:42:13 crc kubenswrapper[4903]: W1126 22:42:13.294985 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb33bddbf_64ec_40c6_a7ea_5919c5a1042d.slice/crio-4848470c24040ead812a968d05b00b25b7be189a93acb516054fcf1d00d9a8a5 WatchSource:0}: Error finding container 4848470c24040ead812a968d05b00b25b7be189a93acb516054fcf1d00d9a8a5: Status 404 returned error can't find the container with id 4848470c24040ead812a968d05b00b25b7be189a93acb516054fcf1d00d9a8a5 Nov 26 22:42:13 crc kubenswrapper[4903]: W1126 22:42:13.332187 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod40ee578a_9b83_43a6_a627_550a1f6fa958.slice/crio-e555a03260bed86fa64a6f27e603cc26efa8e661f5075ee719283a49f9008400 WatchSource:0}: Error finding container e555a03260bed86fa64a6f27e603cc26efa8e661f5075ee719283a49f9008400: Status 404 returned error can't find the container with id e555a03260bed86fa64a6f27e603cc26efa8e661f5075ee719283a49f9008400 Nov 26 22:42:13 crc kubenswrapper[4903]: I1126 22:42:13.487062 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 26 22:42:13 crc kubenswrapper[4903]: I1126 22:42:13.861429 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 22:42:13 crc kubenswrapper[4903]: I1126 22:42:13.991801 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-lv8x6"] Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.003904 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-xhf2r" event={"ID":"97a5a56e-9f78-4cc6-9299-ebe193cad354","Type":"ContainerStarted","Data":"bdc114f58a702c71b3544ee1958a7f89c3282e660dae2d4d6c780d8d221c7910"} Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.010083 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"99dddd38-b1aa-49b9-82c2-4ac85598ef74","Type":"ContainerStarted","Data":"10aceb4ed8e2259b9ef229568a917000f8867d70c6b65ddff2bd8c03446f5417"} Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.018298 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"40ee578a-9b83-43a6-a627-550a1f6fa958","Type":"ContainerStarted","Data":"e555a03260bed86fa64a6f27e603cc26efa8e661f5075ee719283a49f9008400"} Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.020771 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-lhsbl" event={"ID":"b33bddbf-64ec-40c6-a7ea-5919c5a1042d","Type":"ContainerStarted","Data":"d8426fe1ef5abc5d02e44965f680d7bcd077e502b7c2edcc8271d9904bbdbf66"} Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.020796 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-lhsbl" event={"ID":"b33bddbf-64ec-40c6-a7ea-5919c5a1042d","Type":"ContainerStarted","Data":"4848470c24040ead812a968d05b00b25b7be189a93acb516054fcf1d00d9a8a5"} Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.028255 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-xhf2r" podStartSLOduration=4.123989598 podStartE2EDuration="31.028236741s" podCreationTimestamp="2025-11-26 22:41:43 +0000 UTC" firstStartedPulling="2025-11-26 22:41:45.444943972 +0000 UTC m=+1234.135178882" lastFinishedPulling="2025-11-26 22:42:12.349191125 +0000 UTC m=+1261.039426025" observedRunningTime="2025-11-26 22:42:14.02185747 +0000 UTC m=+1262.712092380" watchObservedRunningTime="2025-11-26 22:42:14.028236741 +0000 UTC m=+1262.718471651" Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.040555 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-dz7dx" event={"ID":"8a96189f-52eb-44aa-8638-96d516cd0eb3","Type":"ContainerStarted","Data":"d2bdfa7466b3f339611a3f8ba8cbb5f2122fb16dfe6599730212c78f3e1eae51"} Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.044191 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-lhsbl" podStartSLOduration=15.044180078 podStartE2EDuration="15.044180078s" podCreationTimestamp="2025-11-26 22:41:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:42:14.04238087 +0000 UTC m=+1262.732615780" watchObservedRunningTime="2025-11-26 22:42:14.044180078 +0000 UTC m=+1262.734414988" Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.057593 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-dz7dx" podStartSLOduration=3.51664999 podStartE2EDuration="31.057575277s" podCreationTimestamp="2025-11-26 22:41:43 +0000 UTC" firstStartedPulling="2025-11-26 22:41:44.908950424 +0000 UTC m=+1233.599185334" lastFinishedPulling="2025-11-26 22:42:12.449875721 +0000 UTC m=+1261.140110621" observedRunningTime="2025-11-26 22:42:14.057114655 +0000 UTC m=+1262.747349565" watchObservedRunningTime="2025-11-26 22:42:14.057575277 +0000 UTC m=+1262.747810187" Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.144002 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-qttx2"] Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.364929 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-67dffd5468-nzpt8"] Nov 26 22:42:14 crc kubenswrapper[4903]: W1126 22:42:14.392373 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod892b1cdc_def0_4620_b3b6_d9cc248b33bb.slice/crio-6d8d79495e272f8ebb018a4f058c8c28e7c27632d42d81d8c5a1a8058066cc64 WatchSource:0}: Error finding container 6d8d79495e272f8ebb018a4f058c8c28e7c27632d42d81d8c5a1a8058066cc64: Status 404 returned error can't find the container with id 6d8d79495e272f8ebb018a4f058c8c28e7c27632d42d81d8c5a1a8058066cc64 Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.636710 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5bbd968879-hmnnt"] Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.654557 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5bbd968879-hmnnt" Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.656989 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.657047 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.735814 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5bbd968879-hmnnt"] Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.801345 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/710e7305-de14-46ea-8cc9-1cbc9dcf0a44-public-tls-certs\") pod \"neutron-5bbd968879-hmnnt\" (UID: \"710e7305-de14-46ea-8cc9-1cbc9dcf0a44\") " pod="openstack/neutron-5bbd968879-hmnnt" Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.801396 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/710e7305-de14-46ea-8cc9-1cbc9dcf0a44-httpd-config\") pod \"neutron-5bbd968879-hmnnt\" (UID: \"710e7305-de14-46ea-8cc9-1cbc9dcf0a44\") " pod="openstack/neutron-5bbd968879-hmnnt" Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.801415 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/710e7305-de14-46ea-8cc9-1cbc9dcf0a44-combined-ca-bundle\") pod \"neutron-5bbd968879-hmnnt\" (UID: \"710e7305-de14-46ea-8cc9-1cbc9dcf0a44\") " pod="openstack/neutron-5bbd968879-hmnnt" Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.801650 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zwmk6\" (UniqueName: \"kubernetes.io/projected/710e7305-de14-46ea-8cc9-1cbc9dcf0a44-kube-api-access-zwmk6\") pod \"neutron-5bbd968879-hmnnt\" (UID: \"710e7305-de14-46ea-8cc9-1cbc9dcf0a44\") " pod="openstack/neutron-5bbd968879-hmnnt" Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.801745 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/710e7305-de14-46ea-8cc9-1cbc9dcf0a44-ovndb-tls-certs\") pod \"neutron-5bbd968879-hmnnt\" (UID: \"710e7305-de14-46ea-8cc9-1cbc9dcf0a44\") " pod="openstack/neutron-5bbd968879-hmnnt" Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.801804 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/710e7305-de14-46ea-8cc9-1cbc9dcf0a44-config\") pod \"neutron-5bbd968879-hmnnt\" (UID: \"710e7305-de14-46ea-8cc9-1cbc9dcf0a44\") " pod="openstack/neutron-5bbd968879-hmnnt" Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.801909 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/710e7305-de14-46ea-8cc9-1cbc9dcf0a44-internal-tls-certs\") pod \"neutron-5bbd968879-hmnnt\" (UID: \"710e7305-de14-46ea-8cc9-1cbc9dcf0a44\") " pod="openstack/neutron-5bbd968879-hmnnt" Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.903861 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/710e7305-de14-46ea-8cc9-1cbc9dcf0a44-internal-tls-certs\") pod \"neutron-5bbd968879-hmnnt\" (UID: \"710e7305-de14-46ea-8cc9-1cbc9dcf0a44\") " pod="openstack/neutron-5bbd968879-hmnnt" Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.904346 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/710e7305-de14-46ea-8cc9-1cbc9dcf0a44-public-tls-certs\") pod \"neutron-5bbd968879-hmnnt\" (UID: \"710e7305-de14-46ea-8cc9-1cbc9dcf0a44\") " pod="openstack/neutron-5bbd968879-hmnnt" Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.905391 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/710e7305-de14-46ea-8cc9-1cbc9dcf0a44-httpd-config\") pod \"neutron-5bbd968879-hmnnt\" (UID: \"710e7305-de14-46ea-8cc9-1cbc9dcf0a44\") " pod="openstack/neutron-5bbd968879-hmnnt" Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.906101 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/710e7305-de14-46ea-8cc9-1cbc9dcf0a44-combined-ca-bundle\") pod \"neutron-5bbd968879-hmnnt\" (UID: \"710e7305-de14-46ea-8cc9-1cbc9dcf0a44\") " pod="openstack/neutron-5bbd968879-hmnnt" Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.906286 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zwmk6\" (UniqueName: \"kubernetes.io/projected/710e7305-de14-46ea-8cc9-1cbc9dcf0a44-kube-api-access-zwmk6\") pod \"neutron-5bbd968879-hmnnt\" (UID: \"710e7305-de14-46ea-8cc9-1cbc9dcf0a44\") " pod="openstack/neutron-5bbd968879-hmnnt" Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.906355 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/710e7305-de14-46ea-8cc9-1cbc9dcf0a44-ovndb-tls-certs\") pod \"neutron-5bbd968879-hmnnt\" (UID: \"710e7305-de14-46ea-8cc9-1cbc9dcf0a44\") " pod="openstack/neutron-5bbd968879-hmnnt" Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.906388 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/710e7305-de14-46ea-8cc9-1cbc9dcf0a44-config\") pod \"neutron-5bbd968879-hmnnt\" (UID: \"710e7305-de14-46ea-8cc9-1cbc9dcf0a44\") " pod="openstack/neutron-5bbd968879-hmnnt" Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.913276 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/710e7305-de14-46ea-8cc9-1cbc9dcf0a44-internal-tls-certs\") pod \"neutron-5bbd968879-hmnnt\" (UID: \"710e7305-de14-46ea-8cc9-1cbc9dcf0a44\") " pod="openstack/neutron-5bbd968879-hmnnt" Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.913860 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/710e7305-de14-46ea-8cc9-1cbc9dcf0a44-ovndb-tls-certs\") pod \"neutron-5bbd968879-hmnnt\" (UID: \"710e7305-de14-46ea-8cc9-1cbc9dcf0a44\") " pod="openstack/neutron-5bbd968879-hmnnt" Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.915893 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/710e7305-de14-46ea-8cc9-1cbc9dcf0a44-public-tls-certs\") pod \"neutron-5bbd968879-hmnnt\" (UID: \"710e7305-de14-46ea-8cc9-1cbc9dcf0a44\") " pod="openstack/neutron-5bbd968879-hmnnt" Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.916208 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/710e7305-de14-46ea-8cc9-1cbc9dcf0a44-httpd-config\") pod \"neutron-5bbd968879-hmnnt\" (UID: \"710e7305-de14-46ea-8cc9-1cbc9dcf0a44\") " pod="openstack/neutron-5bbd968879-hmnnt" Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.922656 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/710e7305-de14-46ea-8cc9-1cbc9dcf0a44-combined-ca-bundle\") pod \"neutron-5bbd968879-hmnnt\" (UID: \"710e7305-de14-46ea-8cc9-1cbc9dcf0a44\") " pod="openstack/neutron-5bbd968879-hmnnt" Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.938025 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/710e7305-de14-46ea-8cc9-1cbc9dcf0a44-config\") pod \"neutron-5bbd968879-hmnnt\" (UID: \"710e7305-de14-46ea-8cc9-1cbc9dcf0a44\") " pod="openstack/neutron-5bbd968879-hmnnt" Nov 26 22:42:14 crc kubenswrapper[4903]: I1126 22:42:14.953397 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zwmk6\" (UniqueName: \"kubernetes.io/projected/710e7305-de14-46ea-8cc9-1cbc9dcf0a44-kube-api-access-zwmk6\") pod \"neutron-5bbd968879-hmnnt\" (UID: \"710e7305-de14-46ea-8cc9-1cbc9dcf0a44\") " pod="openstack/neutron-5bbd968879-hmnnt" Nov 26 22:42:15 crc kubenswrapper[4903]: I1126 22:42:15.049095 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5bbd968879-hmnnt" Nov 26 22:42:15 crc kubenswrapper[4903]: I1126 22:42:15.053072 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-qttx2" event={"ID":"63158eb2-a413-4b83-9218-cabc20543498","Type":"ContainerStarted","Data":"d742e80d747139bb306c5baad19e65ac20ec395e08323f0cedc5e985b3f66f49"} Nov 26 22:42:15 crc kubenswrapper[4903]: I1126 22:42:15.054734 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-w6n8h" event={"ID":"f42951d5-40b8-4f39-8a87-5f7e5809bf87","Type":"ContainerStarted","Data":"c5590704da57a0e07ad6a74affef23064afbed74c71edf63ef9bf49c5c1e04e2"} Nov 26 22:42:15 crc kubenswrapper[4903]: I1126 22:42:15.058135 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-67dffd5468-nzpt8" event={"ID":"892b1cdc-def0-4620-b3b6-d9cc248b33bb","Type":"ContainerStarted","Data":"6d8d79495e272f8ebb018a4f058c8c28e7c27632d42d81d8c5a1a8058066cc64"} Nov 26 22:42:15 crc kubenswrapper[4903]: I1126 22:42:15.066107 4903 generic.go:334] "Generic (PLEG): container finished" podID="ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5" containerID="3ec88e4305bb37bdc325d52853374b9e7f6f44dff69ac555676e0da0317a62fd" exitCode=0 Nov 26 22:42:15 crc kubenswrapper[4903]: I1126 22:42:15.066185 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-lv8x6" event={"ID":"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5","Type":"ContainerDied","Data":"3ec88e4305bb37bdc325d52853374b9e7f6f44dff69ac555676e0da0317a62fd"} Nov 26 22:42:15 crc kubenswrapper[4903]: I1126 22:42:15.066219 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-lv8x6" event={"ID":"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5","Type":"ContainerStarted","Data":"9d3988b3d02e9f5d25eecd09b4cf52a982cb63e0507a5c275107fc5a860a644c"} Nov 26 22:42:15 crc kubenswrapper[4903]: I1126 22:42:15.070876 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e3b2305d-da0f-4efb-9ac8-3df8527f9dec","Type":"ContainerStarted","Data":"80fae27da19b974cb9e0b08887be36e3c9e401e504cd21caf4c2b5afbf2df925"} Nov 26 22:42:15 crc kubenswrapper[4903]: I1126 22:42:15.101801 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-w6n8h" podStartSLOduration=3.897128695 podStartE2EDuration="32.101777779s" podCreationTimestamp="2025-11-26 22:41:43 +0000 UTC" firstStartedPulling="2025-11-26 22:41:45.615315212 +0000 UTC m=+1234.305550122" lastFinishedPulling="2025-11-26 22:42:13.819964296 +0000 UTC m=+1262.510199206" observedRunningTime="2025-11-26 22:42:15.075041993 +0000 UTC m=+1263.765276903" watchObservedRunningTime="2025-11-26 22:42:15.101777779 +0000 UTC m=+1263.792012689" Nov 26 22:42:15 crc kubenswrapper[4903]: I1126 22:42:15.560064 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-lv8x6" Nov 26 22:42:15 crc kubenswrapper[4903]: W1126 22:42:15.730021 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod710e7305_de14_46ea_8cc9_1cbc9dcf0a44.slice/crio-50dcb9f3b88d84ed49a64c00a4f5adc224a668c0331a6812f433015951252c72 WatchSource:0}: Error finding container 50dcb9f3b88d84ed49a64c00a4f5adc224a668c0331a6812f433015951252c72: Status 404 returned error can't find the container with id 50dcb9f3b88d84ed49a64c00a4f5adc224a668c0331a6812f433015951252c72 Nov 26 22:42:15 crc kubenswrapper[4903]: I1126 22:42:15.734074 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sm4ss\" (UniqueName: \"kubernetes.io/projected/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-kube-api-access-sm4ss\") pod \"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5\" (UID: \"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5\") " Nov 26 22:42:15 crc kubenswrapper[4903]: I1126 22:42:15.734173 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-dns-svc\") pod \"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5\" (UID: \"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5\") " Nov 26 22:42:15 crc kubenswrapper[4903]: I1126 22:42:15.734220 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-dns-swift-storage-0\") pod \"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5\" (UID: \"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5\") " Nov 26 22:42:15 crc kubenswrapper[4903]: I1126 22:42:15.734284 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-ovsdbserver-nb\") pod \"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5\" (UID: \"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5\") " Nov 26 22:42:15 crc kubenswrapper[4903]: I1126 22:42:15.734315 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-ovsdbserver-sb\") pod \"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5\" (UID: \"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5\") " Nov 26 22:42:15 crc kubenswrapper[4903]: I1126 22:42:15.734408 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-config\") pod \"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5\" (UID: \"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5\") " Nov 26 22:42:15 crc kubenswrapper[4903]: I1126 22:42:15.748336 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-kube-api-access-sm4ss" (OuterVolumeSpecName: "kube-api-access-sm4ss") pod "ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5" (UID: "ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5"). InnerVolumeSpecName "kube-api-access-sm4ss". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:42:15 crc kubenswrapper[4903]: I1126 22:42:15.760373 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5bbd968879-hmnnt"] Nov 26 22:42:15 crc kubenswrapper[4903]: I1126 22:42:15.768776 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-config" (OuterVolumeSpecName: "config") pod "ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5" (UID: "ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:42:15 crc kubenswrapper[4903]: I1126 22:42:15.777751 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5" (UID: "ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:42:15 crc kubenswrapper[4903]: I1126 22:42:15.791348 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5" (UID: "ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:42:15 crc kubenswrapper[4903]: I1126 22:42:15.800579 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5" (UID: "ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:42:15 crc kubenswrapper[4903]: I1126 22:42:15.811553 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5" (UID: "ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:42:15 crc kubenswrapper[4903]: I1126 22:42:15.838610 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:15 crc kubenswrapper[4903]: I1126 22:42:15.838648 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sm4ss\" (UniqueName: \"kubernetes.io/projected/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-kube-api-access-sm4ss\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:15 crc kubenswrapper[4903]: I1126 22:42:15.838662 4903 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:15 crc kubenswrapper[4903]: I1126 22:42:15.838678 4903 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:15 crc kubenswrapper[4903]: I1126 22:42:15.838705 4903 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:15 crc kubenswrapper[4903]: I1126 22:42:15.838717 4903 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:15 crc kubenswrapper[4903]: I1126 22:42:15.862333 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-764c5664d7-f9gqr" podUID="f4d67314-0052-4f9e-9e9a-76f829dea702" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.172:5353: i/o timeout" Nov 26 22:42:16 crc kubenswrapper[4903]: I1126 22:42:16.085580 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5bbd968879-hmnnt" event={"ID":"710e7305-de14-46ea-8cc9-1cbc9dcf0a44","Type":"ContainerStarted","Data":"50dcb9f3b88d84ed49a64c00a4f5adc224a668c0331a6812f433015951252c72"} Nov 26 22:42:16 crc kubenswrapper[4903]: I1126 22:42:16.089624 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-lv8x6" event={"ID":"ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5","Type":"ContainerDied","Data":"9d3988b3d02e9f5d25eecd09b4cf52a982cb63e0507a5c275107fc5a860a644c"} Nov 26 22:42:16 crc kubenswrapper[4903]: I1126 22:42:16.089770 4903 scope.go:117] "RemoveContainer" containerID="3ec88e4305bb37bdc325d52853374b9e7f6f44dff69ac555676e0da0317a62fd" Nov 26 22:42:16 crc kubenswrapper[4903]: I1126 22:42:16.089720 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-lv8x6" Nov 26 22:42:16 crc kubenswrapper[4903]: I1126 22:42:16.141053 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-lv8x6"] Nov 26 22:42:16 crc kubenswrapper[4903]: I1126 22:42:16.149227 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-lv8x6"] Nov 26 22:42:18 crc kubenswrapper[4903]: I1126 22:42:18.048486 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5" path="/var/lib/kubelet/pods/ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5/volumes" Nov 26 22:42:18 crc kubenswrapper[4903]: I1126 22:42:18.130600 4903 generic.go:334] "Generic (PLEG): container finished" podID="63158eb2-a413-4b83-9218-cabc20543498" containerID="3f73b9d94076a8779d914ab901b72ba0e9e73767117fd6bd482afc5366bb7101" exitCode=0 Nov 26 22:42:18 crc kubenswrapper[4903]: I1126 22:42:18.130681 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-qttx2" event={"ID":"63158eb2-a413-4b83-9218-cabc20543498","Type":"ContainerDied","Data":"3f73b9d94076a8779d914ab901b72ba0e9e73767117fd6bd482afc5366bb7101"} Nov 26 22:42:18 crc kubenswrapper[4903]: I1126 22:42:18.134842 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5bbd968879-hmnnt" event={"ID":"710e7305-de14-46ea-8cc9-1cbc9dcf0a44","Type":"ContainerStarted","Data":"47a2cb2de37b4b27e15268b4e8bd9d8f3b89129de51fbc814f485e4103782195"} Nov 26 22:42:18 crc kubenswrapper[4903]: I1126 22:42:18.134868 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5bbd968879-hmnnt" event={"ID":"710e7305-de14-46ea-8cc9-1cbc9dcf0a44","Type":"ContainerStarted","Data":"b776434a1ed3d9e876709a03c3402f00db2483a2de4f729c48b41e3efd113a2b"} Nov 26 22:42:18 crc kubenswrapper[4903]: I1126 22:42:18.135413 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5bbd968879-hmnnt" Nov 26 22:42:18 crc kubenswrapper[4903]: I1126 22:42:18.137726 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-67dffd5468-nzpt8" event={"ID":"892b1cdc-def0-4620-b3b6-d9cc248b33bb","Type":"ContainerStarted","Data":"1e234fbb81e8fdfd2f422f70e8ea8a8560118a0a403f5e8261ca3ebcf00e82c5"} Nov 26 22:42:18 crc kubenswrapper[4903]: I1126 22:42:18.137784 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-67dffd5468-nzpt8" event={"ID":"892b1cdc-def0-4620-b3b6-d9cc248b33bb","Type":"ContainerStarted","Data":"ee374de47b2bbe4c5585c033c9bc204aeb8bfb52fc46b943a7e85f4ceb655027"} Nov 26 22:42:18 crc kubenswrapper[4903]: I1126 22:42:18.137972 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-67dffd5468-nzpt8" Nov 26 22:42:18 crc kubenswrapper[4903]: I1126 22:42:18.139682 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"99dddd38-b1aa-49b9-82c2-4ac85598ef74","Type":"ContainerStarted","Data":"815cb31ccc71c0f5bf6c756f7f032647662cd9ec3d847d5e988f99acc6f0d477"} Nov 26 22:42:18 crc kubenswrapper[4903]: I1126 22:42:18.145265 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"40ee578a-9b83-43a6-a627-550a1f6fa958","Type":"ContainerStarted","Data":"615980318c4da046c67fafeca69f12314f5f0d861e3c29ee387b126da1c4b631"} Nov 26 22:42:18 crc kubenswrapper[4903]: I1126 22:42:18.168976 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5bbd968879-hmnnt" podStartSLOduration=4.168962193 podStartE2EDuration="4.168962193s" podCreationTimestamp="2025-11-26 22:42:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:42:18.166858597 +0000 UTC m=+1266.857093497" watchObservedRunningTime="2025-11-26 22:42:18.168962193 +0000 UTC m=+1266.859197103" Nov 26 22:42:18 crc kubenswrapper[4903]: I1126 22:42:18.191480 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-67dffd5468-nzpt8" podStartSLOduration=6.191464655 podStartE2EDuration="6.191464655s" podCreationTimestamp="2025-11-26 22:42:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:42:18.188342502 +0000 UTC m=+1266.878577412" watchObservedRunningTime="2025-11-26 22:42:18.191464655 +0000 UTC m=+1266.881699565" Nov 26 22:42:19 crc kubenswrapper[4903]: I1126 22:42:19.157847 4903 generic.go:334] "Generic (PLEG): container finished" podID="b33bddbf-64ec-40c6-a7ea-5919c5a1042d" containerID="d8426fe1ef5abc5d02e44965f680d7bcd077e502b7c2edcc8271d9904bbdbf66" exitCode=0 Nov 26 22:42:19 crc kubenswrapper[4903]: I1126 22:42:19.158349 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-lhsbl" event={"ID":"b33bddbf-64ec-40c6-a7ea-5919c5a1042d","Type":"ContainerDied","Data":"d8426fe1ef5abc5d02e44965f680d7bcd077e502b7c2edcc8271d9904bbdbf66"} Nov 26 22:42:19 crc kubenswrapper[4903]: I1126 22:42:19.161515 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"99dddd38-b1aa-49b9-82c2-4ac85598ef74","Type":"ContainerStarted","Data":"ec4c452e74e3df9cf0bc6186413c8c13d2158912e0fc840fc06d34b1a2813d41"} Nov 26 22:42:19 crc kubenswrapper[4903]: I1126 22:42:19.161541 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="99dddd38-b1aa-49b9-82c2-4ac85598ef74" containerName="glance-log" containerID="cri-o://815cb31ccc71c0f5bf6c756f7f032647662cd9ec3d847d5e988f99acc6f0d477" gracePeriod=30 Nov 26 22:42:19 crc kubenswrapper[4903]: I1126 22:42:19.161583 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="99dddd38-b1aa-49b9-82c2-4ac85598ef74" containerName="glance-httpd" containerID="cri-o://ec4c452e74e3df9cf0bc6186413c8c13d2158912e0fc840fc06d34b1a2813d41" gracePeriod=30 Nov 26 22:42:19 crc kubenswrapper[4903]: I1126 22:42:19.163598 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"40ee578a-9b83-43a6-a627-550a1f6fa958","Type":"ContainerStarted","Data":"d5f23d77ea87c9b1e52394484cd367d39e3eba9c5e902e39b98294e5f2d0ec79"} Nov 26 22:42:19 crc kubenswrapper[4903]: I1126 22:42:19.163793 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="40ee578a-9b83-43a6-a627-550a1f6fa958" containerName="glance-httpd" containerID="cri-o://d5f23d77ea87c9b1e52394484cd367d39e3eba9c5e902e39b98294e5f2d0ec79" gracePeriod=30 Nov 26 22:42:19 crc kubenswrapper[4903]: I1126 22:42:19.163786 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="40ee578a-9b83-43a6-a627-550a1f6fa958" containerName="glance-log" containerID="cri-o://615980318c4da046c67fafeca69f12314f5f0d861e3c29ee387b126da1c4b631" gracePeriod=30 Nov 26 22:42:19 crc kubenswrapper[4903]: I1126 22:42:19.170260 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-qttx2" event={"ID":"63158eb2-a413-4b83-9218-cabc20543498","Type":"ContainerStarted","Data":"e3933e4fde501795d0d907b563137866190add24b8b61e29f3e2e902b91f03b7"} Nov 26 22:42:19 crc kubenswrapper[4903]: I1126 22:42:19.170411 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-55f844cf75-qttx2" Nov 26 22:42:19 crc kubenswrapper[4903]: I1126 22:42:19.201939 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-55f844cf75-qttx2" podStartSLOduration=7.201921574 podStartE2EDuration="7.201921574s" podCreationTimestamp="2025-11-26 22:42:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:42:19.20102483 +0000 UTC m=+1267.891259740" watchObservedRunningTime="2025-11-26 22:42:19.201921574 +0000 UTC m=+1267.892156484" Nov 26 22:42:19 crc kubenswrapper[4903]: I1126 22:42:19.235963 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=17.235944075 podStartE2EDuration="17.235944075s" podCreationTimestamp="2025-11-26 22:42:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:42:19.228072995 +0000 UTC m=+1267.918307895" watchObservedRunningTime="2025-11-26 22:42:19.235944075 +0000 UTC m=+1267.926178985" Nov 26 22:42:19 crc kubenswrapper[4903]: I1126 22:42:19.262683 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=17.26266179 podStartE2EDuration="17.26266179s" podCreationTimestamp="2025-11-26 22:42:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:42:19.25557215 +0000 UTC m=+1267.945807080" watchObservedRunningTime="2025-11-26 22:42:19.26266179 +0000 UTC m=+1267.952896700" Nov 26 22:42:20 crc kubenswrapper[4903]: I1126 22:42:20.190939 4903 generic.go:334] "Generic (PLEG): container finished" podID="40ee578a-9b83-43a6-a627-550a1f6fa958" containerID="d5f23d77ea87c9b1e52394484cd367d39e3eba9c5e902e39b98294e5f2d0ec79" exitCode=0 Nov 26 22:42:20 crc kubenswrapper[4903]: I1126 22:42:20.191276 4903 generic.go:334] "Generic (PLEG): container finished" podID="40ee578a-9b83-43a6-a627-550a1f6fa958" containerID="615980318c4da046c67fafeca69f12314f5f0d861e3c29ee387b126da1c4b631" exitCode=143 Nov 26 22:42:20 crc kubenswrapper[4903]: I1126 22:42:20.191373 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"40ee578a-9b83-43a6-a627-550a1f6fa958","Type":"ContainerDied","Data":"d5f23d77ea87c9b1e52394484cd367d39e3eba9c5e902e39b98294e5f2d0ec79"} Nov 26 22:42:20 crc kubenswrapper[4903]: I1126 22:42:20.191417 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"40ee578a-9b83-43a6-a627-550a1f6fa958","Type":"ContainerDied","Data":"615980318c4da046c67fafeca69f12314f5f0d861e3c29ee387b126da1c4b631"} Nov 26 22:42:20 crc kubenswrapper[4903]: I1126 22:42:20.193711 4903 generic.go:334] "Generic (PLEG): container finished" podID="99dddd38-b1aa-49b9-82c2-4ac85598ef74" containerID="ec4c452e74e3df9cf0bc6186413c8c13d2158912e0fc840fc06d34b1a2813d41" exitCode=0 Nov 26 22:42:20 crc kubenswrapper[4903]: I1126 22:42:20.193732 4903 generic.go:334] "Generic (PLEG): container finished" podID="99dddd38-b1aa-49b9-82c2-4ac85598ef74" containerID="815cb31ccc71c0f5bf6c756f7f032647662cd9ec3d847d5e988f99acc6f0d477" exitCode=143 Nov 26 22:42:20 crc kubenswrapper[4903]: I1126 22:42:20.194809 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"99dddd38-b1aa-49b9-82c2-4ac85598ef74","Type":"ContainerDied","Data":"ec4c452e74e3df9cf0bc6186413c8c13d2158912e0fc840fc06d34b1a2813d41"} Nov 26 22:42:20 crc kubenswrapper[4903]: I1126 22:42:20.194834 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"99dddd38-b1aa-49b9-82c2-4ac85598ef74","Type":"ContainerDied","Data":"815cb31ccc71c0f5bf6c756f7f032647662cd9ec3d847d5e988f99acc6f0d477"} Nov 26 22:42:21 crc kubenswrapper[4903]: I1126 22:42:21.206741 4903 generic.go:334] "Generic (PLEG): container finished" podID="97a5a56e-9f78-4cc6-9299-ebe193cad354" containerID="bdc114f58a702c71b3544ee1958a7f89c3282e660dae2d4d6c780d8d221c7910" exitCode=0 Nov 26 22:42:21 crc kubenswrapper[4903]: I1126 22:42:21.206836 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-xhf2r" event={"ID":"97a5a56e-9f78-4cc6-9299-ebe193cad354","Type":"ContainerDied","Data":"bdc114f58a702c71b3544ee1958a7f89c3282e660dae2d4d6c780d8d221c7910"} Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.088144 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-lhsbl" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.090128 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-xhf2r" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.125753 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.216170 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"40ee578a-9b83-43a6-a627-550a1f6fa958\" (UID: \"40ee578a-9b83-43a6-a627-550a1f6fa958\") " Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.216649 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gngg6\" (UniqueName: \"kubernetes.io/projected/97a5a56e-9f78-4cc6-9299-ebe193cad354-kube-api-access-gngg6\") pod \"97a5a56e-9f78-4cc6-9299-ebe193cad354\" (UID: \"97a5a56e-9f78-4cc6-9299-ebe193cad354\") " Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.216722 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97a5a56e-9f78-4cc6-9299-ebe193cad354-combined-ca-bundle\") pod \"97a5a56e-9f78-4cc6-9299-ebe193cad354\" (UID: \"97a5a56e-9f78-4cc6-9299-ebe193cad354\") " Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.216783 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40ee578a-9b83-43a6-a627-550a1f6fa958-scripts\") pod \"40ee578a-9b83-43a6-a627-550a1f6fa958\" (UID: \"40ee578a-9b83-43a6-a627-550a1f6fa958\") " Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.216817 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-combined-ca-bundle\") pod \"b33bddbf-64ec-40c6-a7ea-5919c5a1042d\" (UID: \"b33bddbf-64ec-40c6-a7ea-5919c5a1042d\") " Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.216857 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97a5a56e-9f78-4cc6-9299-ebe193cad354-scripts\") pod \"97a5a56e-9f78-4cc6-9299-ebe193cad354\" (UID: \"97a5a56e-9f78-4cc6-9299-ebe193cad354\") " Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.216936 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/40ee578a-9b83-43a6-a627-550a1f6fa958-httpd-run\") pod \"40ee578a-9b83-43a6-a627-550a1f6fa958\" (UID: \"40ee578a-9b83-43a6-a627-550a1f6fa958\") " Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.217015 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-credential-keys\") pod \"b33bddbf-64ec-40c6-a7ea-5919c5a1042d\" (UID: \"b33bddbf-64ec-40c6-a7ea-5919c5a1042d\") " Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.217044 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-phh5w\" (UniqueName: \"kubernetes.io/projected/40ee578a-9b83-43a6-a627-550a1f6fa958-kube-api-access-phh5w\") pod \"40ee578a-9b83-43a6-a627-550a1f6fa958\" (UID: \"40ee578a-9b83-43a6-a627-550a1f6fa958\") " Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.217086 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-scripts\") pod \"b33bddbf-64ec-40c6-a7ea-5919c5a1042d\" (UID: \"b33bddbf-64ec-40c6-a7ea-5919c5a1042d\") " Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.217127 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97a5a56e-9f78-4cc6-9299-ebe193cad354-logs\") pod \"97a5a56e-9f78-4cc6-9299-ebe193cad354\" (UID: \"97a5a56e-9f78-4cc6-9299-ebe193cad354\") " Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.217153 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-fernet-keys\") pod \"b33bddbf-64ec-40c6-a7ea-5919c5a1042d\" (UID: \"b33bddbf-64ec-40c6-a7ea-5919c5a1042d\") " Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.217182 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40ee578a-9b83-43a6-a627-550a1f6fa958-config-data\") pod \"40ee578a-9b83-43a6-a627-550a1f6fa958\" (UID: \"40ee578a-9b83-43a6-a627-550a1f6fa958\") " Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.217214 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/40ee578a-9b83-43a6-a627-550a1f6fa958-logs\") pod \"40ee578a-9b83-43a6-a627-550a1f6fa958\" (UID: \"40ee578a-9b83-43a6-a627-550a1f6fa958\") " Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.217287 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97a5a56e-9f78-4cc6-9299-ebe193cad354-config-data\") pod \"97a5a56e-9f78-4cc6-9299-ebe193cad354\" (UID: \"97a5a56e-9f78-4cc6-9299-ebe193cad354\") " Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.217323 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-config-data\") pod \"b33bddbf-64ec-40c6-a7ea-5919c5a1042d\" (UID: \"b33bddbf-64ec-40c6-a7ea-5919c5a1042d\") " Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.217350 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40ee578a-9b83-43a6-a627-550a1f6fa958-combined-ca-bundle\") pod \"40ee578a-9b83-43a6-a627-550a1f6fa958\" (UID: \"40ee578a-9b83-43a6-a627-550a1f6fa958\") " Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.217398 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m5rgq\" (UniqueName: \"kubernetes.io/projected/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-kube-api-access-m5rgq\") pod \"b33bddbf-64ec-40c6-a7ea-5919c5a1042d\" (UID: \"b33bddbf-64ec-40c6-a7ea-5919c5a1042d\") " Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.221796 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/97a5a56e-9f78-4cc6-9299-ebe193cad354-logs" (OuterVolumeSpecName: "logs") pod "97a5a56e-9f78-4cc6-9299-ebe193cad354" (UID: "97a5a56e-9f78-4cc6-9299-ebe193cad354"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.221903 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40ee578a-9b83-43a6-a627-550a1f6fa958-scripts" (OuterVolumeSpecName: "scripts") pod "40ee578a-9b83-43a6-a627-550a1f6fa958" (UID: "40ee578a-9b83-43a6-a627-550a1f6fa958"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.223572 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97a5a56e-9f78-4cc6-9299-ebe193cad354-kube-api-access-gngg6" (OuterVolumeSpecName: "kube-api-access-gngg6") pod "97a5a56e-9f78-4cc6-9299-ebe193cad354" (UID: "97a5a56e-9f78-4cc6-9299-ebe193cad354"). InnerVolumeSpecName "kube-api-access-gngg6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.223941 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/40ee578a-9b83-43a6-a627-550a1f6fa958-logs" (OuterVolumeSpecName: "logs") pod "40ee578a-9b83-43a6-a627-550a1f6fa958" (UID: "40ee578a-9b83-43a6-a627-550a1f6fa958"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.225587 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-kube-api-access-m5rgq" (OuterVolumeSpecName: "kube-api-access-m5rgq") pod "b33bddbf-64ec-40c6-a7ea-5919c5a1042d" (UID: "b33bddbf-64ec-40c6-a7ea-5919c5a1042d"). InnerVolumeSpecName "kube-api-access-m5rgq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.226806 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance") pod "40ee578a-9b83-43a6-a627-550a1f6fa958" (UID: "40ee578a-9b83-43a6-a627-550a1f6fa958"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.227488 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "b33bddbf-64ec-40c6-a7ea-5919c5a1042d" (UID: "b33bddbf-64ec-40c6-a7ea-5919c5a1042d"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.227846 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/40ee578a-9b83-43a6-a627-550a1f6fa958-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "40ee578a-9b83-43a6-a627-550a1f6fa958" (UID: "40ee578a-9b83-43a6-a627-550a1f6fa958"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.232562 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97a5a56e-9f78-4cc6-9299-ebe193cad354-scripts" (OuterVolumeSpecName: "scripts") pod "97a5a56e-9f78-4cc6-9299-ebe193cad354" (UID: "97a5a56e-9f78-4cc6-9299-ebe193cad354"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.235627 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"99dddd38-b1aa-49b9-82c2-4ac85598ef74","Type":"ContainerDied","Data":"10aceb4ed8e2259b9ef229568a917000f8867d70c6b65ddff2bd8c03446f5417"} Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.235665 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="10aceb4ed8e2259b9ef229568a917000f8867d70c6b65ddff2bd8c03446f5417" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.235725 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40ee578a-9b83-43a6-a627-550a1f6fa958-kube-api-access-phh5w" (OuterVolumeSpecName: "kube-api-access-phh5w") pod "40ee578a-9b83-43a6-a627-550a1f6fa958" (UID: "40ee578a-9b83-43a6-a627-550a1f6fa958"). InnerVolumeSpecName "kube-api-access-phh5w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.251915 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-scripts" (OuterVolumeSpecName: "scripts") pod "b33bddbf-64ec-40c6-a7ea-5919c5a1042d" (UID: "b33bddbf-64ec-40c6-a7ea-5919c5a1042d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.252791 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e3b2305d-da0f-4efb-9ac8-3df8527f9dec","Type":"ContainerStarted","Data":"c6fe33af25f2479439e622058688ae8cb76e78f88ed71095ccafa36430df3db2"} Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.260206 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-xhf2r" event={"ID":"97a5a56e-9f78-4cc6-9299-ebe193cad354","Type":"ContainerDied","Data":"2e6951f8f6173c343554925b13d9de350ee0076c899ed6c48b028dc4960a1a69"} Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.260254 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2e6951f8f6173c343554925b13d9de350ee0076c899ed6c48b028dc4960a1a69" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.260323 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-xhf2r" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.268139 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"40ee578a-9b83-43a6-a627-550a1f6fa958","Type":"ContainerDied","Data":"e555a03260bed86fa64a6f27e603cc26efa8e661f5075ee719283a49f9008400"} Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.268188 4903 scope.go:117] "RemoveContainer" containerID="d5f23d77ea87c9b1e52394484cd367d39e3eba9c5e902e39b98294e5f2d0ec79" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.268321 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.268616 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.270814 4903 generic.go:334] "Generic (PLEG): container finished" podID="f42951d5-40b8-4f39-8a87-5f7e5809bf87" containerID="c5590704da57a0e07ad6a74affef23064afbed74c71edf63ef9bf49c5c1e04e2" exitCode=0 Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.270979 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-w6n8h" event={"ID":"f42951d5-40b8-4f39-8a87-5f7e5809bf87","Type":"ContainerDied","Data":"c5590704da57a0e07ad6a74affef23064afbed74c71edf63ef9bf49c5c1e04e2"} Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.283686 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "b33bddbf-64ec-40c6-a7ea-5919c5a1042d" (UID: "b33bddbf-64ec-40c6-a7ea-5919c5a1042d"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.283806 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40ee578a-9b83-43a6-a627-550a1f6fa958-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "40ee578a-9b83-43a6-a627-550a1f6fa958" (UID: "40ee578a-9b83-43a6-a627-550a1f6fa958"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.288472 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-lhsbl" event={"ID":"b33bddbf-64ec-40c6-a7ea-5919c5a1042d","Type":"ContainerDied","Data":"4848470c24040ead812a968d05b00b25b7be189a93acb516054fcf1d00d9a8a5"} Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.288529 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-lhsbl" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.288552 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4848470c24040ead812a968d05b00b25b7be189a93acb516054fcf1d00d9a8a5" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.295825 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-config-data" (OuterVolumeSpecName: "config-data") pod "b33bddbf-64ec-40c6-a7ea-5919c5a1042d" (UID: "b33bddbf-64ec-40c6-a7ea-5919c5a1042d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.301536 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b33bddbf-64ec-40c6-a7ea-5919c5a1042d" (UID: "b33bddbf-64ec-40c6-a7ea-5919c5a1042d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.321089 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d64dv\" (UniqueName: \"kubernetes.io/projected/99dddd38-b1aa-49b9-82c2-4ac85598ef74-kube-api-access-d64dv\") pod \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\" (UID: \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\") " Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.321252 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/99dddd38-b1aa-49b9-82c2-4ac85598ef74-scripts\") pod \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\" (UID: \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\") " Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.321321 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99dddd38-b1aa-49b9-82c2-4ac85598ef74-combined-ca-bundle\") pod \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\" (UID: \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\") " Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.321342 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99dddd38-b1aa-49b9-82c2-4ac85598ef74-logs\") pod \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\" (UID: \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\") " Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.321400 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\" (UID: \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\") " Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.321425 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/99dddd38-b1aa-49b9-82c2-4ac85598ef74-httpd-run\") pod \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\" (UID: \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\") " Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.321470 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99dddd38-b1aa-49b9-82c2-4ac85598ef74-config-data\") pod \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\" (UID: \"99dddd38-b1aa-49b9-82c2-4ac85598ef74\") " Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.323962 4903 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97a5a56e-9f78-4cc6-9299-ebe193cad354-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.323978 4903 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/40ee578a-9b83-43a6-a627-550a1f6fa958-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.323988 4903 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.323999 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-phh5w\" (UniqueName: \"kubernetes.io/projected/40ee578a-9b83-43a6-a627-550a1f6fa958-kube-api-access-phh5w\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.324008 4903 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.324018 4903 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97a5a56e-9f78-4cc6-9299-ebe193cad354-logs\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.324026 4903 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.324035 4903 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/40ee578a-9b83-43a6-a627-550a1f6fa958-logs\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.324042 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.324050 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40ee578a-9b83-43a6-a627-550a1f6fa958-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.324058 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m5rgq\" (UniqueName: \"kubernetes.io/projected/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-kube-api-access-m5rgq\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.324077 4903 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.324086 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gngg6\" (UniqueName: \"kubernetes.io/projected/97a5a56e-9f78-4cc6-9299-ebe193cad354-kube-api-access-gngg6\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.324094 4903 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40ee578a-9b83-43a6-a627-550a1f6fa958-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.324102 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b33bddbf-64ec-40c6-a7ea-5919c5a1042d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.329135 4903 scope.go:117] "RemoveContainer" containerID="615980318c4da046c67fafeca69f12314f5f0d861e3c29ee387b126da1c4b631" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.332206 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/99dddd38-b1aa-49b9-82c2-4ac85598ef74-logs" (OuterVolumeSpecName: "logs") pod "99dddd38-b1aa-49b9-82c2-4ac85598ef74" (UID: "99dddd38-b1aa-49b9-82c2-4ac85598ef74"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.334319 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/99dddd38-b1aa-49b9-82c2-4ac85598ef74-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "99dddd38-b1aa-49b9-82c2-4ac85598ef74" (UID: "99dddd38-b1aa-49b9-82c2-4ac85598ef74"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.335435 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97a5a56e-9f78-4cc6-9299-ebe193cad354-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "97a5a56e-9f78-4cc6-9299-ebe193cad354" (UID: "97a5a56e-9f78-4cc6-9299-ebe193cad354"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.335563 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97a5a56e-9f78-4cc6-9299-ebe193cad354-config-data" (OuterVolumeSpecName: "config-data") pod "97a5a56e-9f78-4cc6-9299-ebe193cad354" (UID: "97a5a56e-9f78-4cc6-9299-ebe193cad354"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.336758 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-747d9754b8-8kqq9"] Nov 26 22:42:23 crc kubenswrapper[4903]: E1126 22:42:23.337244 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40ee578a-9b83-43a6-a627-550a1f6fa958" containerName="glance-httpd" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.337265 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="40ee578a-9b83-43a6-a627-550a1f6fa958" containerName="glance-httpd" Nov 26 22:42:23 crc kubenswrapper[4903]: E1126 22:42:23.337279 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b33bddbf-64ec-40c6-a7ea-5919c5a1042d" containerName="keystone-bootstrap" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.337285 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="b33bddbf-64ec-40c6-a7ea-5919c5a1042d" containerName="keystone-bootstrap" Nov 26 22:42:23 crc kubenswrapper[4903]: E1126 22:42:23.337297 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99dddd38-b1aa-49b9-82c2-4ac85598ef74" containerName="glance-httpd" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.337303 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="99dddd38-b1aa-49b9-82c2-4ac85598ef74" containerName="glance-httpd" Nov 26 22:42:23 crc kubenswrapper[4903]: E1126 22:42:23.337320 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5" containerName="init" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.337326 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5" containerName="init" Nov 26 22:42:23 crc kubenswrapper[4903]: E1126 22:42:23.337348 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99dddd38-b1aa-49b9-82c2-4ac85598ef74" containerName="glance-log" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.337354 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="99dddd38-b1aa-49b9-82c2-4ac85598ef74" containerName="glance-log" Nov 26 22:42:23 crc kubenswrapper[4903]: E1126 22:42:23.337368 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97a5a56e-9f78-4cc6-9299-ebe193cad354" containerName="placement-db-sync" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.337373 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="97a5a56e-9f78-4cc6-9299-ebe193cad354" containerName="placement-db-sync" Nov 26 22:42:23 crc kubenswrapper[4903]: E1126 22:42:23.337399 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40ee578a-9b83-43a6-a627-550a1f6fa958" containerName="glance-log" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.337405 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="40ee578a-9b83-43a6-a627-550a1f6fa958" containerName="glance-log" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.337640 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="40ee578a-9b83-43a6-a627-550a1f6fa958" containerName="glance-log" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.337656 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="99dddd38-b1aa-49b9-82c2-4ac85598ef74" containerName="glance-log" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.337675 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="b33bddbf-64ec-40c6-a7ea-5919c5a1042d" containerName="keystone-bootstrap" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.337736 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="40ee578a-9b83-43a6-a627-550a1f6fa958" containerName="glance-httpd" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.337755 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba2f783a-8bc8-4d80-8ad1-2b8cc13299f5" containerName="init" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.337766 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="99dddd38-b1aa-49b9-82c2-4ac85598ef74" containerName="glance-httpd" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.337772 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="97a5a56e-9f78-4cc6-9299-ebe193cad354" containerName="placement-db-sync" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.338903 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "99dddd38-b1aa-49b9-82c2-4ac85598ef74" (UID: "99dddd38-b1aa-49b9-82c2-4ac85598ef74"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.338919 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99dddd38-b1aa-49b9-82c2-4ac85598ef74-scripts" (OuterVolumeSpecName: "scripts") pod "99dddd38-b1aa-49b9-82c2-4ac85598ef74" (UID: "99dddd38-b1aa-49b9-82c2-4ac85598ef74"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.338971 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-747d9754b8-8kqq9" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.342053 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.342302 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.349106 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99dddd38-b1aa-49b9-82c2-4ac85598ef74-kube-api-access-d64dv" (OuterVolumeSpecName: "kube-api-access-d64dv") pod "99dddd38-b1aa-49b9-82c2-4ac85598ef74" (UID: "99dddd38-b1aa-49b9-82c2-4ac85598ef74"). InnerVolumeSpecName "kube-api-access-d64dv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.368762 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-747d9754b8-8kqq9"] Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.368863 4903 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.382186 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99dddd38-b1aa-49b9-82c2-4ac85598ef74-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "99dddd38-b1aa-49b9-82c2-4ac85598ef74" (UID: "99dddd38-b1aa-49b9-82c2-4ac85598ef74"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.391424 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40ee578a-9b83-43a6-a627-550a1f6fa958-config-data" (OuterVolumeSpecName: "config-data") pod "40ee578a-9b83-43a6-a627-550a1f6fa958" (UID: "40ee578a-9b83-43a6-a627-550a1f6fa958"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.429369 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/74c08acb-478e-442a-b66d-5f29e75790f4-logs\") pod \"placement-747d9754b8-8kqq9\" (UID: \"74c08acb-478e-442a-b66d-5f29e75790f4\") " pod="openstack/placement-747d9754b8-8kqq9" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.429439 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74c08acb-478e-442a-b66d-5f29e75790f4-scripts\") pod \"placement-747d9754b8-8kqq9\" (UID: \"74c08acb-478e-442a-b66d-5f29e75790f4\") " pod="openstack/placement-747d9754b8-8kqq9" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.429498 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74c08acb-478e-442a-b66d-5f29e75790f4-config-data\") pod \"placement-747d9754b8-8kqq9\" (UID: \"74c08acb-478e-442a-b66d-5f29e75790f4\") " pod="openstack/placement-747d9754b8-8kqq9" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.429529 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/74c08acb-478e-442a-b66d-5f29e75790f4-internal-tls-certs\") pod \"placement-747d9754b8-8kqq9\" (UID: \"74c08acb-478e-442a-b66d-5f29e75790f4\") " pod="openstack/placement-747d9754b8-8kqq9" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.429554 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74c08acb-478e-442a-b66d-5f29e75790f4-combined-ca-bundle\") pod \"placement-747d9754b8-8kqq9\" (UID: \"74c08acb-478e-442a-b66d-5f29e75790f4\") " pod="openstack/placement-747d9754b8-8kqq9" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.429571 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/74c08acb-478e-442a-b66d-5f29e75790f4-public-tls-certs\") pod \"placement-747d9754b8-8kqq9\" (UID: \"74c08acb-478e-442a-b66d-5f29e75790f4\") " pod="openstack/placement-747d9754b8-8kqq9" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.429612 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cm7lg\" (UniqueName: \"kubernetes.io/projected/74c08acb-478e-442a-b66d-5f29e75790f4-kube-api-access-cm7lg\") pod \"placement-747d9754b8-8kqq9\" (UID: \"74c08acb-478e-442a-b66d-5f29e75790f4\") " pod="openstack/placement-747d9754b8-8kqq9" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.429739 4903 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.429755 4903 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.429765 4903 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/99dddd38-b1aa-49b9-82c2-4ac85598ef74-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.429775 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97a5a56e-9f78-4cc6-9299-ebe193cad354-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.429785 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d64dv\" (UniqueName: \"kubernetes.io/projected/99dddd38-b1aa-49b9-82c2-4ac85598ef74-kube-api-access-d64dv\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.429793 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40ee578a-9b83-43a6-a627-550a1f6fa958-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.429802 4903 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/99dddd38-b1aa-49b9-82c2-4ac85598ef74-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.429810 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97a5a56e-9f78-4cc6-9299-ebe193cad354-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.429820 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99dddd38-b1aa-49b9-82c2-4ac85598ef74-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.429827 4903 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99dddd38-b1aa-49b9-82c2-4ac85598ef74-logs\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.450670 4903 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.454546 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99dddd38-b1aa-49b9-82c2-4ac85598ef74-config-data" (OuterVolumeSpecName: "config-data") pod "99dddd38-b1aa-49b9-82c2-4ac85598ef74" (UID: "99dddd38-b1aa-49b9-82c2-4ac85598ef74"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.532444 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74c08acb-478e-442a-b66d-5f29e75790f4-combined-ca-bundle\") pod \"placement-747d9754b8-8kqq9\" (UID: \"74c08acb-478e-442a-b66d-5f29e75790f4\") " pod="openstack/placement-747d9754b8-8kqq9" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.532537 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/74c08acb-478e-442a-b66d-5f29e75790f4-public-tls-certs\") pod \"placement-747d9754b8-8kqq9\" (UID: \"74c08acb-478e-442a-b66d-5f29e75790f4\") " pod="openstack/placement-747d9754b8-8kqq9" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.532630 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cm7lg\" (UniqueName: \"kubernetes.io/projected/74c08acb-478e-442a-b66d-5f29e75790f4-kube-api-access-cm7lg\") pod \"placement-747d9754b8-8kqq9\" (UID: \"74c08acb-478e-442a-b66d-5f29e75790f4\") " pod="openstack/placement-747d9754b8-8kqq9" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.532814 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/74c08acb-478e-442a-b66d-5f29e75790f4-logs\") pod \"placement-747d9754b8-8kqq9\" (UID: \"74c08acb-478e-442a-b66d-5f29e75790f4\") " pod="openstack/placement-747d9754b8-8kqq9" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.532928 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74c08acb-478e-442a-b66d-5f29e75790f4-scripts\") pod \"placement-747d9754b8-8kqq9\" (UID: \"74c08acb-478e-442a-b66d-5f29e75790f4\") " pod="openstack/placement-747d9754b8-8kqq9" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.533043 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74c08acb-478e-442a-b66d-5f29e75790f4-config-data\") pod \"placement-747d9754b8-8kqq9\" (UID: \"74c08acb-478e-442a-b66d-5f29e75790f4\") " pod="openstack/placement-747d9754b8-8kqq9" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.533101 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/74c08acb-478e-442a-b66d-5f29e75790f4-internal-tls-certs\") pod \"placement-747d9754b8-8kqq9\" (UID: \"74c08acb-478e-442a-b66d-5f29e75790f4\") " pod="openstack/placement-747d9754b8-8kqq9" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.533214 4903 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.533246 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99dddd38-b1aa-49b9-82c2-4ac85598ef74-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.533534 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/74c08acb-478e-442a-b66d-5f29e75790f4-logs\") pod \"placement-747d9754b8-8kqq9\" (UID: \"74c08acb-478e-442a-b66d-5f29e75790f4\") " pod="openstack/placement-747d9754b8-8kqq9" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.537563 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/74c08acb-478e-442a-b66d-5f29e75790f4-public-tls-certs\") pod \"placement-747d9754b8-8kqq9\" (UID: \"74c08acb-478e-442a-b66d-5f29e75790f4\") " pod="openstack/placement-747d9754b8-8kqq9" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.538159 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74c08acb-478e-442a-b66d-5f29e75790f4-scripts\") pod \"placement-747d9754b8-8kqq9\" (UID: \"74c08acb-478e-442a-b66d-5f29e75790f4\") " pod="openstack/placement-747d9754b8-8kqq9" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.539306 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/74c08acb-478e-442a-b66d-5f29e75790f4-internal-tls-certs\") pod \"placement-747d9754b8-8kqq9\" (UID: \"74c08acb-478e-442a-b66d-5f29e75790f4\") " pod="openstack/placement-747d9754b8-8kqq9" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.540190 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74c08acb-478e-442a-b66d-5f29e75790f4-combined-ca-bundle\") pod \"placement-747d9754b8-8kqq9\" (UID: \"74c08acb-478e-442a-b66d-5f29e75790f4\") " pod="openstack/placement-747d9754b8-8kqq9" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.541513 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74c08acb-478e-442a-b66d-5f29e75790f4-config-data\") pod \"placement-747d9754b8-8kqq9\" (UID: \"74c08acb-478e-442a-b66d-5f29e75790f4\") " pod="openstack/placement-747d9754b8-8kqq9" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.550412 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cm7lg\" (UniqueName: \"kubernetes.io/projected/74c08acb-478e-442a-b66d-5f29e75790f4-kube-api-access-cm7lg\") pod \"placement-747d9754b8-8kqq9\" (UID: \"74c08acb-478e-442a-b66d-5f29e75790f4\") " pod="openstack/placement-747d9754b8-8kqq9" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.614714 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.625313 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.649478 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.651297 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.660459 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.680706 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.682279 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.687314 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-747d9754b8-8kqq9" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.736983 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/306f5561-d47c-4e76-a4ab-5320d327954d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.737062 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/306f5561-d47c-4e76-a4ab-5320d327954d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.737207 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/306f5561-d47c-4e76-a4ab-5320d327954d-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.737259 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/306f5561-d47c-4e76-a4ab-5320d327954d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.737428 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/306f5561-d47c-4e76-a4ab-5320d327954d-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.737487 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/306f5561-d47c-4e76-a4ab-5320d327954d-logs\") pod \"glance-default-internal-api-0\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.737533 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.737641 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9qxb\" (UniqueName: \"kubernetes.io/projected/306f5561-d47c-4e76-a4ab-5320d327954d-kube-api-access-n9qxb\") pod \"glance-default-internal-api-0\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.842881 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9qxb\" (UniqueName: \"kubernetes.io/projected/306f5561-d47c-4e76-a4ab-5320d327954d-kube-api-access-n9qxb\") pod \"glance-default-internal-api-0\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.842961 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/306f5561-d47c-4e76-a4ab-5320d327954d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.843007 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/306f5561-d47c-4e76-a4ab-5320d327954d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.843054 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/306f5561-d47c-4e76-a4ab-5320d327954d-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.843074 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/306f5561-d47c-4e76-a4ab-5320d327954d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.843116 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/306f5561-d47c-4e76-a4ab-5320d327954d-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.843143 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/306f5561-d47c-4e76-a4ab-5320d327954d-logs\") pod \"glance-default-internal-api-0\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.843168 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.843323 4903 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-internal-api-0" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.848033 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/306f5561-d47c-4e76-a4ab-5320d327954d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.848111 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/306f5561-d47c-4e76-a4ab-5320d327954d-logs\") pod \"glance-default-internal-api-0\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.855671 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/306f5561-d47c-4e76-a4ab-5320d327954d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.856801 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/306f5561-d47c-4e76-a4ab-5320d327954d-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.859397 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/306f5561-d47c-4e76-a4ab-5320d327954d-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.867866 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/306f5561-d47c-4e76-a4ab-5320d327954d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.873758 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9qxb\" (UniqueName: \"kubernetes.io/projected/306f5561-d47c-4e76-a4ab-5320d327954d-kube-api-access-n9qxb\") pod \"glance-default-internal-api-0\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:23 crc kubenswrapper[4903]: I1126 22:42:23.880747 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.014460 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.055550 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40ee578a-9b83-43a6-a627-550a1f6fa958" path="/var/lib/kubelet/pods/40ee578a-9b83-43a6-a627-550a1f6fa958/volumes" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.198648 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-fcdf5f968-7ppxk"] Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.200009 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-fcdf5f968-7ppxk" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.219026 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-pdkfz" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.219042 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.219073 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.219031 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.219286 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.219339 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.251905 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-fcdf5f968-7ppxk"] Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.253190 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b3f65d25-6e7d-4b8e-99e1-c75c39abb982-fernet-keys\") pod \"keystone-fcdf5f968-7ppxk\" (UID: \"b3f65d25-6e7d-4b8e-99e1-c75c39abb982\") " pod="openstack/keystone-fcdf5f968-7ppxk" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.253245 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3f65d25-6e7d-4b8e-99e1-c75c39abb982-combined-ca-bundle\") pod \"keystone-fcdf5f968-7ppxk\" (UID: \"b3f65d25-6e7d-4b8e-99e1-c75c39abb982\") " pod="openstack/keystone-fcdf5f968-7ppxk" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.253287 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b3f65d25-6e7d-4b8e-99e1-c75c39abb982-credential-keys\") pod \"keystone-fcdf5f968-7ppxk\" (UID: \"b3f65d25-6e7d-4b8e-99e1-c75c39abb982\") " pod="openstack/keystone-fcdf5f968-7ppxk" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.253307 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3f65d25-6e7d-4b8e-99e1-c75c39abb982-config-data\") pod \"keystone-fcdf5f968-7ppxk\" (UID: \"b3f65d25-6e7d-4b8e-99e1-c75c39abb982\") " pod="openstack/keystone-fcdf5f968-7ppxk" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.253344 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3f65d25-6e7d-4b8e-99e1-c75c39abb982-public-tls-certs\") pod \"keystone-fcdf5f968-7ppxk\" (UID: \"b3f65d25-6e7d-4b8e-99e1-c75c39abb982\") " pod="openstack/keystone-fcdf5f968-7ppxk" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.253393 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3f65d25-6e7d-4b8e-99e1-c75c39abb982-internal-tls-certs\") pod \"keystone-fcdf5f968-7ppxk\" (UID: \"b3f65d25-6e7d-4b8e-99e1-c75c39abb982\") " pod="openstack/keystone-fcdf5f968-7ppxk" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.253417 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3f65d25-6e7d-4b8e-99e1-c75c39abb982-scripts\") pod \"keystone-fcdf5f968-7ppxk\" (UID: \"b3f65d25-6e7d-4b8e-99e1-c75c39abb982\") " pod="openstack/keystone-fcdf5f968-7ppxk" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.253458 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkjk5\" (UniqueName: \"kubernetes.io/projected/b3f65d25-6e7d-4b8e-99e1-c75c39abb982-kube-api-access-pkjk5\") pod \"keystone-fcdf5f968-7ppxk\" (UID: \"b3f65d25-6e7d-4b8e-99e1-c75c39abb982\") " pod="openstack/keystone-fcdf5f968-7ppxk" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.260992 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-747d9754b8-8kqq9"] Nov 26 22:42:24 crc kubenswrapper[4903]: W1126 22:42:24.268930 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod74c08acb_478e_442a_b66d_5f29e75790f4.slice/crio-7657a23cbe3368fe6d3c35f81a15124de9b2340435a664f9c1724034b8935277 WatchSource:0}: Error finding container 7657a23cbe3368fe6d3c35f81a15124de9b2340435a664f9c1724034b8935277: Status 404 returned error can't find the container with id 7657a23cbe3368fe6d3c35f81a15124de9b2340435a664f9c1724034b8935277 Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.310535 4903 generic.go:334] "Generic (PLEG): container finished" podID="8a96189f-52eb-44aa-8638-96d516cd0eb3" containerID="d2bdfa7466b3f339611a3f8ba8cbb5f2122fb16dfe6599730212c78f3e1eae51" exitCode=0 Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.310612 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-dz7dx" event={"ID":"8a96189f-52eb-44aa-8638-96d516cd0eb3","Type":"ContainerDied","Data":"d2bdfa7466b3f339611a3f8ba8cbb5f2122fb16dfe6599730212c78f3e1eae51"} Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.318944 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-747d9754b8-8kqq9" event={"ID":"74c08acb-478e-442a-b66d-5f29e75790f4","Type":"ContainerStarted","Data":"7657a23cbe3368fe6d3c35f81a15124de9b2340435a664f9c1724034b8935277"} Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.320230 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.359201 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b3f65d25-6e7d-4b8e-99e1-c75c39abb982-credential-keys\") pod \"keystone-fcdf5f968-7ppxk\" (UID: \"b3f65d25-6e7d-4b8e-99e1-c75c39abb982\") " pod="openstack/keystone-fcdf5f968-7ppxk" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.359505 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3f65d25-6e7d-4b8e-99e1-c75c39abb982-config-data\") pod \"keystone-fcdf5f968-7ppxk\" (UID: \"b3f65d25-6e7d-4b8e-99e1-c75c39abb982\") " pod="openstack/keystone-fcdf5f968-7ppxk" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.359542 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3f65d25-6e7d-4b8e-99e1-c75c39abb982-public-tls-certs\") pod \"keystone-fcdf5f968-7ppxk\" (UID: \"b3f65d25-6e7d-4b8e-99e1-c75c39abb982\") " pod="openstack/keystone-fcdf5f968-7ppxk" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.359589 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3f65d25-6e7d-4b8e-99e1-c75c39abb982-internal-tls-certs\") pod \"keystone-fcdf5f968-7ppxk\" (UID: \"b3f65d25-6e7d-4b8e-99e1-c75c39abb982\") " pod="openstack/keystone-fcdf5f968-7ppxk" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.359609 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3f65d25-6e7d-4b8e-99e1-c75c39abb982-scripts\") pod \"keystone-fcdf5f968-7ppxk\" (UID: \"b3f65d25-6e7d-4b8e-99e1-c75c39abb982\") " pod="openstack/keystone-fcdf5f968-7ppxk" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.359637 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pkjk5\" (UniqueName: \"kubernetes.io/projected/b3f65d25-6e7d-4b8e-99e1-c75c39abb982-kube-api-access-pkjk5\") pod \"keystone-fcdf5f968-7ppxk\" (UID: \"b3f65d25-6e7d-4b8e-99e1-c75c39abb982\") " pod="openstack/keystone-fcdf5f968-7ppxk" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.359732 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b3f65d25-6e7d-4b8e-99e1-c75c39abb982-fernet-keys\") pod \"keystone-fcdf5f968-7ppxk\" (UID: \"b3f65d25-6e7d-4b8e-99e1-c75c39abb982\") " pod="openstack/keystone-fcdf5f968-7ppxk" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.359757 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3f65d25-6e7d-4b8e-99e1-c75c39abb982-combined-ca-bundle\") pod \"keystone-fcdf5f968-7ppxk\" (UID: \"b3f65d25-6e7d-4b8e-99e1-c75c39abb982\") " pod="openstack/keystone-fcdf5f968-7ppxk" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.365200 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3f65d25-6e7d-4b8e-99e1-c75c39abb982-internal-tls-certs\") pod \"keystone-fcdf5f968-7ppxk\" (UID: \"b3f65d25-6e7d-4b8e-99e1-c75c39abb982\") " pod="openstack/keystone-fcdf5f968-7ppxk" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.365571 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3f65d25-6e7d-4b8e-99e1-c75c39abb982-scripts\") pod \"keystone-fcdf5f968-7ppxk\" (UID: \"b3f65d25-6e7d-4b8e-99e1-c75c39abb982\") " pod="openstack/keystone-fcdf5f968-7ppxk" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.369078 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3f65d25-6e7d-4b8e-99e1-c75c39abb982-combined-ca-bundle\") pod \"keystone-fcdf5f968-7ppxk\" (UID: \"b3f65d25-6e7d-4b8e-99e1-c75c39abb982\") " pod="openstack/keystone-fcdf5f968-7ppxk" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.369484 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3f65d25-6e7d-4b8e-99e1-c75c39abb982-config-data\") pod \"keystone-fcdf5f968-7ppxk\" (UID: \"b3f65d25-6e7d-4b8e-99e1-c75c39abb982\") " pod="openstack/keystone-fcdf5f968-7ppxk" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.373533 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3f65d25-6e7d-4b8e-99e1-c75c39abb982-public-tls-certs\") pod \"keystone-fcdf5f968-7ppxk\" (UID: \"b3f65d25-6e7d-4b8e-99e1-c75c39abb982\") " pod="openstack/keystone-fcdf5f968-7ppxk" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.375841 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b3f65d25-6e7d-4b8e-99e1-c75c39abb982-credential-keys\") pod \"keystone-fcdf5f968-7ppxk\" (UID: \"b3f65d25-6e7d-4b8e-99e1-c75c39abb982\") " pod="openstack/keystone-fcdf5f968-7ppxk" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.384104 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.389624 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b3f65d25-6e7d-4b8e-99e1-c75c39abb982-fernet-keys\") pod \"keystone-fcdf5f968-7ppxk\" (UID: \"b3f65d25-6e7d-4b8e-99e1-c75c39abb982\") " pod="openstack/keystone-fcdf5f968-7ppxk" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.396033 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkjk5\" (UniqueName: \"kubernetes.io/projected/b3f65d25-6e7d-4b8e-99e1-c75c39abb982-kube-api-access-pkjk5\") pod \"keystone-fcdf5f968-7ppxk\" (UID: \"b3f65d25-6e7d-4b8e-99e1-c75c39abb982\") " pod="openstack/keystone-fcdf5f968-7ppxk" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.400133 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.408574 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.410743 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.415288 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.415505 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.419907 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.472145 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/635b390e-4ef6-41fb-a81d-cf4f819d2b66-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.472276 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/635b390e-4ef6-41fb-a81d-cf4f819d2b66-logs\") pod \"glance-default-external-api-0\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.472372 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/635b390e-4ef6-41fb-a81d-cf4f819d2b66-config-data\") pod \"glance-default-external-api-0\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.472496 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ff6z\" (UniqueName: \"kubernetes.io/projected/635b390e-4ef6-41fb-a81d-cf4f819d2b66-kube-api-access-8ff6z\") pod \"glance-default-external-api-0\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.472607 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/635b390e-4ef6-41fb-a81d-cf4f819d2b66-scripts\") pod \"glance-default-external-api-0\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.472724 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/635b390e-4ef6-41fb-a81d-cf4f819d2b66-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.472811 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.472924 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/635b390e-4ef6-41fb-a81d-cf4f819d2b66-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.544092 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-fcdf5f968-7ppxk" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.576500 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/635b390e-4ef6-41fb-a81d-cf4f819d2b66-scripts\") pod \"glance-default-external-api-0\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.576565 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/635b390e-4ef6-41fb-a81d-cf4f819d2b66-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.576601 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.576646 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/635b390e-4ef6-41fb-a81d-cf4f819d2b66-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.576704 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/635b390e-4ef6-41fb-a81d-cf4f819d2b66-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.576722 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/635b390e-4ef6-41fb-a81d-cf4f819d2b66-logs\") pod \"glance-default-external-api-0\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.576754 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/635b390e-4ef6-41fb-a81d-cf4f819d2b66-config-data\") pod \"glance-default-external-api-0\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.576814 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ff6z\" (UniqueName: \"kubernetes.io/projected/635b390e-4ef6-41fb-a81d-cf4f819d2b66-kube-api-access-8ff6z\") pod \"glance-default-external-api-0\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.578280 4903 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.578299 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/635b390e-4ef6-41fb-a81d-cf4f819d2b66-logs\") pod \"glance-default-external-api-0\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.578552 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/635b390e-4ef6-41fb-a81d-cf4f819d2b66-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.587079 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/635b390e-4ef6-41fb-a81d-cf4f819d2b66-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.587543 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/635b390e-4ef6-41fb-a81d-cf4f819d2b66-config-data\") pod \"glance-default-external-api-0\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.588985 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/635b390e-4ef6-41fb-a81d-cf4f819d2b66-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.603448 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ff6z\" (UniqueName: \"kubernetes.io/projected/635b390e-4ef6-41fb-a81d-cf4f819d2b66-kube-api-access-8ff6z\") pod \"glance-default-external-api-0\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.603857 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/635b390e-4ef6-41fb-a81d-cf4f819d2b66-scripts\") pod \"glance-default-external-api-0\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.615512 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.630503 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " pod="openstack/glance-default-external-api-0" Nov 26 22:42:24 crc kubenswrapper[4903]: W1126 22:42:24.645892 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod306f5561_d47c_4e76_a4ab_5320d327954d.slice/crio-9f8af7dedbeb59b532171464238d9c4cf2df6bd3dc35a84d87c8a0cff165dec6 WatchSource:0}: Error finding container 9f8af7dedbeb59b532171464238d9c4cf2df6bd3dc35a84d87c8a0cff165dec6: Status 404 returned error can't find the container with id 9f8af7dedbeb59b532171464238d9c4cf2df6bd3dc35a84d87c8a0cff165dec6 Nov 26 22:42:24 crc kubenswrapper[4903]: I1126 22:42:24.728172 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.006400 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-w6n8h" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.094324 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kdx64\" (UniqueName: \"kubernetes.io/projected/f42951d5-40b8-4f39-8a87-5f7e5809bf87-kube-api-access-kdx64\") pod \"f42951d5-40b8-4f39-8a87-5f7e5809bf87\" (UID: \"f42951d5-40b8-4f39-8a87-5f7e5809bf87\") " Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.094432 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f42951d5-40b8-4f39-8a87-5f7e5809bf87-combined-ca-bundle\") pod \"f42951d5-40b8-4f39-8a87-5f7e5809bf87\" (UID: \"f42951d5-40b8-4f39-8a87-5f7e5809bf87\") " Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.094745 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f42951d5-40b8-4f39-8a87-5f7e5809bf87-db-sync-config-data\") pod \"f42951d5-40b8-4f39-8a87-5f7e5809bf87\" (UID: \"f42951d5-40b8-4f39-8a87-5f7e5809bf87\") " Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.097283 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-fcdf5f968-7ppxk"] Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.102033 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f42951d5-40b8-4f39-8a87-5f7e5809bf87-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "f42951d5-40b8-4f39-8a87-5f7e5809bf87" (UID: "f42951d5-40b8-4f39-8a87-5f7e5809bf87"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.102184 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f42951d5-40b8-4f39-8a87-5f7e5809bf87-kube-api-access-kdx64" (OuterVolumeSpecName: "kube-api-access-kdx64") pod "f42951d5-40b8-4f39-8a87-5f7e5809bf87" (UID: "f42951d5-40b8-4f39-8a87-5f7e5809bf87"). InnerVolumeSpecName "kube-api-access-kdx64". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.133815 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f42951d5-40b8-4f39-8a87-5f7e5809bf87-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f42951d5-40b8-4f39-8a87-5f7e5809bf87" (UID: "f42951d5-40b8-4f39-8a87-5f7e5809bf87"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.197586 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kdx64\" (UniqueName: \"kubernetes.io/projected/f42951d5-40b8-4f39-8a87-5f7e5809bf87-kube-api-access-kdx64\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.197616 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f42951d5-40b8-4f39-8a87-5f7e5809bf87-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.197626 4903 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/f42951d5-40b8-4f39-8a87-5f7e5809bf87-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.339515 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-fcdf5f968-7ppxk" event={"ID":"b3f65d25-6e7d-4b8e-99e1-c75c39abb982","Type":"ContainerStarted","Data":"b8b7be1fc773f99b44ee41622297798e9c4cb5cffe56d3e5767ac6a1bec5140c"} Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.374250 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-747d9754b8-8kqq9" event={"ID":"74c08acb-478e-442a-b66d-5f29e75790f4","Type":"ContainerStarted","Data":"a75291f1d2162e85e1fa5bc86b86651dd08e88119ed995a369fff54777f53135"} Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.374296 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-747d9754b8-8kqq9" event={"ID":"74c08acb-478e-442a-b66d-5f29e75790f4","Type":"ContainerStarted","Data":"b007330d8f740b6cd4c1a1e2d7b6f7502f7532cf6741d1d9cff04bb8f56d3c5e"} Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.375210 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-747d9754b8-8kqq9" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.375234 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-747d9754b8-8kqq9" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.380792 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"306f5561-d47c-4e76-a4ab-5320d327954d","Type":"ContainerStarted","Data":"9f8af7dedbeb59b532171464238d9c4cf2df6bd3dc35a84d87c8a0cff165dec6"} Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.408759 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-w6n8h" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.408766 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-w6n8h" event={"ID":"f42951d5-40b8-4f39-8a87-5f7e5809bf87","Type":"ContainerDied","Data":"a08835c0cb3c4a4c7fba385f1f459f9f7aceddc8a3359675b078fecfb68e03db"} Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.408802 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a08835c0cb3c4a4c7fba385f1f459f9f7aceddc8a3359675b078fecfb68e03db" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.421295 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-747d9754b8-8kqq9" podStartSLOduration=2.421273259 podStartE2EDuration="2.421273259s" podCreationTimestamp="2025-11-26 22:42:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:42:25.398988242 +0000 UTC m=+1274.089223152" watchObservedRunningTime="2025-11-26 22:42:25.421273259 +0000 UTC m=+1274.111508169" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.574746 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-79464fcfdb-twqnx"] Nov 26 22:42:25 crc kubenswrapper[4903]: E1126 22:42:25.575260 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f42951d5-40b8-4f39-8a87-5f7e5809bf87" containerName="barbican-db-sync" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.575272 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f42951d5-40b8-4f39-8a87-5f7e5809bf87" containerName="barbican-db-sync" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.575456 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="f42951d5-40b8-4f39-8a87-5f7e5809bf87" containerName="barbican-db-sync" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.576583 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-79464fcfdb-twqnx" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.579160 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.588316 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.588724 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-nc6m5" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.595931 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-58b8c46b65-kdwlj"] Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.598461 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-58b8c46b65-kdwlj" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.602266 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.606701 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.617035 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-58b8c46b65-kdwlj"] Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.618294 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pl6gz\" (UniqueName: \"kubernetes.io/projected/8824059c-5e2d-4ce5-b224-fc144593d08d-kube-api-access-pl6gz\") pod \"barbican-keystone-listener-79464fcfdb-twqnx\" (UID: \"8824059c-5e2d-4ce5-b224-fc144593d08d\") " pod="openstack/barbican-keystone-listener-79464fcfdb-twqnx" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.618483 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8824059c-5e2d-4ce5-b224-fc144593d08d-combined-ca-bundle\") pod \"barbican-keystone-listener-79464fcfdb-twqnx\" (UID: \"8824059c-5e2d-4ce5-b224-fc144593d08d\") " pod="openstack/barbican-keystone-listener-79464fcfdb-twqnx" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.618532 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8824059c-5e2d-4ce5-b224-fc144593d08d-config-data-custom\") pod \"barbican-keystone-listener-79464fcfdb-twqnx\" (UID: \"8824059c-5e2d-4ce5-b224-fc144593d08d\") " pod="openstack/barbican-keystone-listener-79464fcfdb-twqnx" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.618668 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8824059c-5e2d-4ce5-b224-fc144593d08d-logs\") pod \"barbican-keystone-listener-79464fcfdb-twqnx\" (UID: \"8824059c-5e2d-4ce5-b224-fc144593d08d\") " pod="openstack/barbican-keystone-listener-79464fcfdb-twqnx" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.618775 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8824059c-5e2d-4ce5-b224-fc144593d08d-config-data\") pod \"barbican-keystone-listener-79464fcfdb-twqnx\" (UID: \"8824059c-5e2d-4ce5-b224-fc144593d08d\") " pod="openstack/barbican-keystone-listener-79464fcfdb-twqnx" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.654314 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-79464fcfdb-twqnx"] Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.733592 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8824059c-5e2d-4ce5-b224-fc144593d08d-config-data\") pod \"barbican-keystone-listener-79464fcfdb-twqnx\" (UID: \"8824059c-5e2d-4ce5-b224-fc144593d08d\") " pod="openstack/barbican-keystone-listener-79464fcfdb-twqnx" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.734128 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pl6gz\" (UniqueName: \"kubernetes.io/projected/8824059c-5e2d-4ce5-b224-fc144593d08d-kube-api-access-pl6gz\") pod \"barbican-keystone-listener-79464fcfdb-twqnx\" (UID: \"8824059c-5e2d-4ce5-b224-fc144593d08d\") " pod="openstack/barbican-keystone-listener-79464fcfdb-twqnx" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.734222 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/40944cbe-7c1b-43b0-bed6-28f9490a0d5f-config-data-custom\") pod \"barbican-worker-58b8c46b65-kdwlj\" (UID: \"40944cbe-7c1b-43b0-bed6-28f9490a0d5f\") " pod="openstack/barbican-worker-58b8c46b65-kdwlj" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.734305 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40944cbe-7c1b-43b0-bed6-28f9490a0d5f-config-data\") pod \"barbican-worker-58b8c46b65-kdwlj\" (UID: \"40944cbe-7c1b-43b0-bed6-28f9490a0d5f\") " pod="openstack/barbican-worker-58b8c46b65-kdwlj" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.734348 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8824059c-5e2d-4ce5-b224-fc144593d08d-combined-ca-bundle\") pod \"barbican-keystone-listener-79464fcfdb-twqnx\" (UID: \"8824059c-5e2d-4ce5-b224-fc144593d08d\") " pod="openstack/barbican-keystone-listener-79464fcfdb-twqnx" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.734372 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40944cbe-7c1b-43b0-bed6-28f9490a0d5f-combined-ca-bundle\") pod \"barbican-worker-58b8c46b65-kdwlj\" (UID: \"40944cbe-7c1b-43b0-bed6-28f9490a0d5f\") " pod="openstack/barbican-worker-58b8c46b65-kdwlj" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.734415 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8824059c-5e2d-4ce5-b224-fc144593d08d-config-data-custom\") pod \"barbican-keystone-listener-79464fcfdb-twqnx\" (UID: \"8824059c-5e2d-4ce5-b224-fc144593d08d\") " pod="openstack/barbican-keystone-listener-79464fcfdb-twqnx" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.734460 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kg9bp\" (UniqueName: \"kubernetes.io/projected/40944cbe-7c1b-43b0-bed6-28f9490a0d5f-kube-api-access-kg9bp\") pod \"barbican-worker-58b8c46b65-kdwlj\" (UID: \"40944cbe-7c1b-43b0-bed6-28f9490a0d5f\") " pod="openstack/barbican-worker-58b8c46b65-kdwlj" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.734485 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/40944cbe-7c1b-43b0-bed6-28f9490a0d5f-logs\") pod \"barbican-worker-58b8c46b65-kdwlj\" (UID: \"40944cbe-7c1b-43b0-bed6-28f9490a0d5f\") " pod="openstack/barbican-worker-58b8c46b65-kdwlj" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.734610 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8824059c-5e2d-4ce5-b224-fc144593d08d-logs\") pod \"barbican-keystone-listener-79464fcfdb-twqnx\" (UID: \"8824059c-5e2d-4ce5-b224-fc144593d08d\") " pod="openstack/barbican-keystone-listener-79464fcfdb-twqnx" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.736936 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-qttx2"] Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.739550 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-55f844cf75-qttx2" podUID="63158eb2-a413-4b83-9218-cabc20543498" containerName="dnsmasq-dns" containerID="cri-o://e3933e4fde501795d0d907b563137866190add24b8b61e29f3e2e902b91f03b7" gracePeriod=10 Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.740008 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-55f844cf75-qttx2" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.743540 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8824059c-5e2d-4ce5-b224-fc144593d08d-config-data-custom\") pod \"barbican-keystone-listener-79464fcfdb-twqnx\" (UID: \"8824059c-5e2d-4ce5-b224-fc144593d08d\") " pod="openstack/barbican-keystone-listener-79464fcfdb-twqnx" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.747742 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8824059c-5e2d-4ce5-b224-fc144593d08d-logs\") pod \"barbican-keystone-listener-79464fcfdb-twqnx\" (UID: \"8824059c-5e2d-4ce5-b224-fc144593d08d\") " pod="openstack/barbican-keystone-listener-79464fcfdb-twqnx" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.754520 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8824059c-5e2d-4ce5-b224-fc144593d08d-combined-ca-bundle\") pod \"barbican-keystone-listener-79464fcfdb-twqnx\" (UID: \"8824059c-5e2d-4ce5-b224-fc144593d08d\") " pod="openstack/barbican-keystone-listener-79464fcfdb-twqnx" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.758075 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pl6gz\" (UniqueName: \"kubernetes.io/projected/8824059c-5e2d-4ce5-b224-fc144593d08d-kube-api-access-pl6gz\") pod \"barbican-keystone-listener-79464fcfdb-twqnx\" (UID: \"8824059c-5e2d-4ce5-b224-fc144593d08d\") " pod="openstack/barbican-keystone-listener-79464fcfdb-twqnx" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.758588 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8824059c-5e2d-4ce5-b224-fc144593d08d-config-data\") pod \"barbican-keystone-listener-79464fcfdb-twqnx\" (UID: \"8824059c-5e2d-4ce5-b224-fc144593d08d\") " pod="openstack/barbican-keystone-listener-79464fcfdb-twqnx" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.830435 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-l8k8n"] Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.833019 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.844393 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-l8k8n"] Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.849098 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/40944cbe-7c1b-43b0-bed6-28f9490a0d5f-config-data-custom\") pod \"barbican-worker-58b8c46b65-kdwlj\" (UID: \"40944cbe-7c1b-43b0-bed6-28f9490a0d5f\") " pod="openstack/barbican-worker-58b8c46b65-kdwlj" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.849296 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40944cbe-7c1b-43b0-bed6-28f9490a0d5f-config-data\") pod \"barbican-worker-58b8c46b65-kdwlj\" (UID: \"40944cbe-7c1b-43b0-bed6-28f9490a0d5f\") " pod="openstack/barbican-worker-58b8c46b65-kdwlj" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.849407 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40944cbe-7c1b-43b0-bed6-28f9490a0d5f-combined-ca-bundle\") pod \"barbican-worker-58b8c46b65-kdwlj\" (UID: \"40944cbe-7c1b-43b0-bed6-28f9490a0d5f\") " pod="openstack/barbican-worker-58b8c46b65-kdwlj" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.849548 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kg9bp\" (UniqueName: \"kubernetes.io/projected/40944cbe-7c1b-43b0-bed6-28f9490a0d5f-kube-api-access-kg9bp\") pod \"barbican-worker-58b8c46b65-kdwlj\" (UID: \"40944cbe-7c1b-43b0-bed6-28f9490a0d5f\") " pod="openstack/barbican-worker-58b8c46b65-kdwlj" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.849664 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/40944cbe-7c1b-43b0-bed6-28f9490a0d5f-logs\") pod \"barbican-worker-58b8c46b65-kdwlj\" (UID: \"40944cbe-7c1b-43b0-bed6-28f9490a0d5f\") " pod="openstack/barbican-worker-58b8c46b65-kdwlj" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.850430 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/40944cbe-7c1b-43b0-bed6-28f9490a0d5f-logs\") pod \"barbican-worker-58b8c46b65-kdwlj\" (UID: \"40944cbe-7c1b-43b0-bed6-28f9490a0d5f\") " pod="openstack/barbican-worker-58b8c46b65-kdwlj" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.858371 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40944cbe-7c1b-43b0-bed6-28f9490a0d5f-combined-ca-bundle\") pod \"barbican-worker-58b8c46b65-kdwlj\" (UID: \"40944cbe-7c1b-43b0-bed6-28f9490a0d5f\") " pod="openstack/barbican-worker-58b8c46b65-kdwlj" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.863081 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40944cbe-7c1b-43b0-bed6-28f9490a0d5f-config-data\") pod \"barbican-worker-58b8c46b65-kdwlj\" (UID: \"40944cbe-7c1b-43b0-bed6-28f9490a0d5f\") " pod="openstack/barbican-worker-58b8c46b65-kdwlj" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.872319 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-674b64b8b-7ztpj"] Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.873767 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/40944cbe-7c1b-43b0-bed6-28f9490a0d5f-config-data-custom\") pod \"barbican-worker-58b8c46b65-kdwlj\" (UID: \"40944cbe-7c1b-43b0-bed6-28f9490a0d5f\") " pod="openstack/barbican-worker-58b8c46b65-kdwlj" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.874883 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-674b64b8b-7ztpj" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.886575 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kg9bp\" (UniqueName: \"kubernetes.io/projected/40944cbe-7c1b-43b0-bed6-28f9490a0d5f-kube-api-access-kg9bp\") pod \"barbican-worker-58b8c46b65-kdwlj\" (UID: \"40944cbe-7c1b-43b0-bed6-28f9490a0d5f\") " pod="openstack/barbican-worker-58b8c46b65-kdwlj" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.891058 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.896255 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-674b64b8b-7ztpj"] Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.927729 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-79464fcfdb-twqnx" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.946139 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-58b8c46b65-kdwlj" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.952601 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f6a087fc-426b-4353-b0dc-552c66dfef8a-ovsdbserver-nb\") pod \"dnsmasq-dns-85ff748b95-l8k8n\" (UID: \"f6a087fc-426b-4353-b0dc-552c66dfef8a\") " pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.952708 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f6a087fc-426b-4353-b0dc-552c66dfef8a-dns-swift-storage-0\") pod \"dnsmasq-dns-85ff748b95-l8k8n\" (UID: \"f6a087fc-426b-4353-b0dc-552c66dfef8a\") " pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.952755 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rsrtk\" (UniqueName: \"kubernetes.io/projected/c5e25b6a-d8fd-404a-b7f9-7ebad060a071-kube-api-access-rsrtk\") pod \"barbican-api-674b64b8b-7ztpj\" (UID: \"c5e25b6a-d8fd-404a-b7f9-7ebad060a071\") " pod="openstack/barbican-api-674b64b8b-7ztpj" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.952789 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f6a087fc-426b-4353-b0dc-552c66dfef8a-config\") pod \"dnsmasq-dns-85ff748b95-l8k8n\" (UID: \"f6a087fc-426b-4353-b0dc-552c66dfef8a\") " pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.952818 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6cqzr\" (UniqueName: \"kubernetes.io/projected/f6a087fc-426b-4353-b0dc-552c66dfef8a-kube-api-access-6cqzr\") pod \"dnsmasq-dns-85ff748b95-l8k8n\" (UID: \"f6a087fc-426b-4353-b0dc-552c66dfef8a\") " pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.952850 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f6a087fc-426b-4353-b0dc-552c66dfef8a-dns-svc\") pod \"dnsmasq-dns-85ff748b95-l8k8n\" (UID: \"f6a087fc-426b-4353-b0dc-552c66dfef8a\") " pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.952890 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f6a087fc-426b-4353-b0dc-552c66dfef8a-ovsdbserver-sb\") pod \"dnsmasq-dns-85ff748b95-l8k8n\" (UID: \"f6a087fc-426b-4353-b0dc-552c66dfef8a\") " pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.952960 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c5e25b6a-d8fd-404a-b7f9-7ebad060a071-config-data-custom\") pod \"barbican-api-674b64b8b-7ztpj\" (UID: \"c5e25b6a-d8fd-404a-b7f9-7ebad060a071\") " pod="openstack/barbican-api-674b64b8b-7ztpj" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.952986 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5e25b6a-d8fd-404a-b7f9-7ebad060a071-combined-ca-bundle\") pod \"barbican-api-674b64b8b-7ztpj\" (UID: \"c5e25b6a-d8fd-404a-b7f9-7ebad060a071\") " pod="openstack/barbican-api-674b64b8b-7ztpj" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.953029 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5e25b6a-d8fd-404a-b7f9-7ebad060a071-config-data\") pod \"barbican-api-674b64b8b-7ztpj\" (UID: \"c5e25b6a-d8fd-404a-b7f9-7ebad060a071\") " pod="openstack/barbican-api-674b64b8b-7ztpj" Nov 26 22:42:25 crc kubenswrapper[4903]: I1126 22:42:25.953103 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5e25b6a-d8fd-404a-b7f9-7ebad060a071-logs\") pod \"barbican-api-674b64b8b-7ztpj\" (UID: \"c5e25b6a-d8fd-404a-b7f9-7ebad060a071\") " pod="openstack/barbican-api-674b64b8b-7ztpj" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.055955 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="99dddd38-b1aa-49b9-82c2-4ac85598ef74" path="/var/lib/kubelet/pods/99dddd38-b1aa-49b9-82c2-4ac85598ef74/volumes" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.058173 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5e25b6a-d8fd-404a-b7f9-7ebad060a071-logs\") pod \"barbican-api-674b64b8b-7ztpj\" (UID: \"c5e25b6a-d8fd-404a-b7f9-7ebad060a071\") " pod="openstack/barbican-api-674b64b8b-7ztpj" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.058250 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f6a087fc-426b-4353-b0dc-552c66dfef8a-ovsdbserver-nb\") pod \"dnsmasq-dns-85ff748b95-l8k8n\" (UID: \"f6a087fc-426b-4353-b0dc-552c66dfef8a\") " pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.058304 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f6a087fc-426b-4353-b0dc-552c66dfef8a-dns-swift-storage-0\") pod \"dnsmasq-dns-85ff748b95-l8k8n\" (UID: \"f6a087fc-426b-4353-b0dc-552c66dfef8a\") " pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.058346 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rsrtk\" (UniqueName: \"kubernetes.io/projected/c5e25b6a-d8fd-404a-b7f9-7ebad060a071-kube-api-access-rsrtk\") pod \"barbican-api-674b64b8b-7ztpj\" (UID: \"c5e25b6a-d8fd-404a-b7f9-7ebad060a071\") " pod="openstack/barbican-api-674b64b8b-7ztpj" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.058374 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f6a087fc-426b-4353-b0dc-552c66dfef8a-config\") pod \"dnsmasq-dns-85ff748b95-l8k8n\" (UID: \"f6a087fc-426b-4353-b0dc-552c66dfef8a\") " pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.058394 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6cqzr\" (UniqueName: \"kubernetes.io/projected/f6a087fc-426b-4353-b0dc-552c66dfef8a-kube-api-access-6cqzr\") pod \"dnsmasq-dns-85ff748b95-l8k8n\" (UID: \"f6a087fc-426b-4353-b0dc-552c66dfef8a\") " pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.058411 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f6a087fc-426b-4353-b0dc-552c66dfef8a-dns-svc\") pod \"dnsmasq-dns-85ff748b95-l8k8n\" (UID: \"f6a087fc-426b-4353-b0dc-552c66dfef8a\") " pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.058437 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f6a087fc-426b-4353-b0dc-552c66dfef8a-ovsdbserver-sb\") pod \"dnsmasq-dns-85ff748b95-l8k8n\" (UID: \"f6a087fc-426b-4353-b0dc-552c66dfef8a\") " pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.058479 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c5e25b6a-d8fd-404a-b7f9-7ebad060a071-config-data-custom\") pod \"barbican-api-674b64b8b-7ztpj\" (UID: \"c5e25b6a-d8fd-404a-b7f9-7ebad060a071\") " pod="openstack/barbican-api-674b64b8b-7ztpj" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.058500 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5e25b6a-d8fd-404a-b7f9-7ebad060a071-combined-ca-bundle\") pod \"barbican-api-674b64b8b-7ztpj\" (UID: \"c5e25b6a-d8fd-404a-b7f9-7ebad060a071\") " pod="openstack/barbican-api-674b64b8b-7ztpj" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.058528 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5e25b6a-d8fd-404a-b7f9-7ebad060a071-config-data\") pod \"barbican-api-674b64b8b-7ztpj\" (UID: \"c5e25b6a-d8fd-404a-b7f9-7ebad060a071\") " pod="openstack/barbican-api-674b64b8b-7ztpj" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.060173 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f6a087fc-426b-4353-b0dc-552c66dfef8a-config\") pod \"dnsmasq-dns-85ff748b95-l8k8n\" (UID: \"f6a087fc-426b-4353-b0dc-552c66dfef8a\") " pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.060479 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5e25b6a-d8fd-404a-b7f9-7ebad060a071-logs\") pod \"barbican-api-674b64b8b-7ztpj\" (UID: \"c5e25b6a-d8fd-404a-b7f9-7ebad060a071\") " pod="openstack/barbican-api-674b64b8b-7ztpj" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.061036 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f6a087fc-426b-4353-b0dc-552c66dfef8a-ovsdbserver-nb\") pod \"dnsmasq-dns-85ff748b95-l8k8n\" (UID: \"f6a087fc-426b-4353-b0dc-552c66dfef8a\") " pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.061406 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f6a087fc-426b-4353-b0dc-552c66dfef8a-ovsdbserver-sb\") pod \"dnsmasq-dns-85ff748b95-l8k8n\" (UID: \"f6a087fc-426b-4353-b0dc-552c66dfef8a\") " pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.061629 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f6a087fc-426b-4353-b0dc-552c66dfef8a-dns-swift-storage-0\") pod \"dnsmasq-dns-85ff748b95-l8k8n\" (UID: \"f6a087fc-426b-4353-b0dc-552c66dfef8a\") " pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.062272 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f6a087fc-426b-4353-b0dc-552c66dfef8a-dns-svc\") pod \"dnsmasq-dns-85ff748b95-l8k8n\" (UID: \"f6a087fc-426b-4353-b0dc-552c66dfef8a\") " pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.067018 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c5e25b6a-d8fd-404a-b7f9-7ebad060a071-config-data-custom\") pod \"barbican-api-674b64b8b-7ztpj\" (UID: \"c5e25b6a-d8fd-404a-b7f9-7ebad060a071\") " pod="openstack/barbican-api-674b64b8b-7ztpj" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.074898 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5e25b6a-d8fd-404a-b7f9-7ebad060a071-config-data\") pod \"barbican-api-674b64b8b-7ztpj\" (UID: \"c5e25b6a-d8fd-404a-b7f9-7ebad060a071\") " pod="openstack/barbican-api-674b64b8b-7ztpj" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.078608 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6cqzr\" (UniqueName: \"kubernetes.io/projected/f6a087fc-426b-4353-b0dc-552c66dfef8a-kube-api-access-6cqzr\") pod \"dnsmasq-dns-85ff748b95-l8k8n\" (UID: \"f6a087fc-426b-4353-b0dc-552c66dfef8a\") " pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.084019 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5e25b6a-d8fd-404a-b7f9-7ebad060a071-combined-ca-bundle\") pod \"barbican-api-674b64b8b-7ztpj\" (UID: \"c5e25b6a-d8fd-404a-b7f9-7ebad060a071\") " pod="openstack/barbican-api-674b64b8b-7ztpj" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.084327 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rsrtk\" (UniqueName: \"kubernetes.io/projected/c5e25b6a-d8fd-404a-b7f9-7ebad060a071-kube-api-access-rsrtk\") pod \"barbican-api-674b64b8b-7ztpj\" (UID: \"c5e25b6a-d8fd-404a-b7f9-7ebad060a071\") " pod="openstack/barbican-api-674b64b8b-7ztpj" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.099182 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.112524 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-674b64b8b-7ztpj" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.134953 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-dz7dx" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.264582 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2pl4x\" (UniqueName: \"kubernetes.io/projected/8a96189f-52eb-44aa-8638-96d516cd0eb3-kube-api-access-2pl4x\") pod \"8a96189f-52eb-44aa-8638-96d516cd0eb3\" (UID: \"8a96189f-52eb-44aa-8638-96d516cd0eb3\") " Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.265467 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a96189f-52eb-44aa-8638-96d516cd0eb3-combined-ca-bundle\") pod \"8a96189f-52eb-44aa-8638-96d516cd0eb3\" (UID: \"8a96189f-52eb-44aa-8638-96d516cd0eb3\") " Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.267991 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a96189f-52eb-44aa-8638-96d516cd0eb3-config-data\") pod \"8a96189f-52eb-44aa-8638-96d516cd0eb3\" (UID: \"8a96189f-52eb-44aa-8638-96d516cd0eb3\") " Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.294951 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a96189f-52eb-44aa-8638-96d516cd0eb3-kube-api-access-2pl4x" (OuterVolumeSpecName: "kube-api-access-2pl4x") pod "8a96189f-52eb-44aa-8638-96d516cd0eb3" (UID: "8a96189f-52eb-44aa-8638-96d516cd0eb3"). InnerVolumeSpecName "kube-api-access-2pl4x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.394665 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2pl4x\" (UniqueName: \"kubernetes.io/projected/8a96189f-52eb-44aa-8638-96d516cd0eb3-kube-api-access-2pl4x\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.411076 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a96189f-52eb-44aa-8638-96d516cd0eb3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8a96189f-52eb-44aa-8638-96d516cd0eb3" (UID: "8a96189f-52eb-44aa-8638-96d516cd0eb3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.462353 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a96189f-52eb-44aa-8638-96d516cd0eb3-config-data" (OuterVolumeSpecName: "config-data") pod "8a96189f-52eb-44aa-8638-96d516cd0eb3" (UID: "8a96189f-52eb-44aa-8638-96d516cd0eb3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.474662 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"635b390e-4ef6-41fb-a81d-cf4f819d2b66","Type":"ContainerStarted","Data":"2bbdefa8bf7f14be7a999a70047d42e5205cf1cc221381e75199313cf2c2be79"} Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.475557 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-qttx2" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.480413 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-dz7dx" event={"ID":"8a96189f-52eb-44aa-8638-96d516cd0eb3","Type":"ContainerDied","Data":"de017ef6c413ae718d1a63c119f36cfb3643fa1ed01e649f6229c43acd47a77a"} Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.480452 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="de017ef6c413ae718d1a63c119f36cfb3643fa1ed01e649f6229c43acd47a77a" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.480551 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-dz7dx" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.486616 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-fcdf5f968-7ppxk" event={"ID":"b3f65d25-6e7d-4b8e-99e1-c75c39abb982","Type":"ContainerStarted","Data":"ad315797ab0a1b468c4396d582bb312e41c65b85259b547c07a40f4a3e040cf5"} Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.502274 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-fcdf5f968-7ppxk" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.502408 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"306f5561-d47c-4e76-a4ab-5320d327954d","Type":"ContainerStarted","Data":"8505d04695a9d0c561bd090e0fc5fde3adca7845b5084012c0e05e5fd474d197"} Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.507559 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a96189f-52eb-44aa-8638-96d516cd0eb3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.507604 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a96189f-52eb-44aa-8638-96d516cd0eb3-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.507839 4903 generic.go:334] "Generic (PLEG): container finished" podID="63158eb2-a413-4b83-9218-cabc20543498" containerID="e3933e4fde501795d0d907b563137866190add24b8b61e29f3e2e902b91f03b7" exitCode=0 Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.508046 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-qttx2" event={"ID":"63158eb2-a413-4b83-9218-cabc20543498","Type":"ContainerDied","Data":"e3933e4fde501795d0d907b563137866190add24b8b61e29f3e2e902b91f03b7"} Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.508104 4903 scope.go:117] "RemoveContainer" containerID="e3933e4fde501795d0d907b563137866190add24b8b61e29f3e2e902b91f03b7" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.508341 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-qttx2" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.615444 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/63158eb2-a413-4b83-9218-cabc20543498-dns-svc\") pod \"63158eb2-a413-4b83-9218-cabc20543498\" (UID: \"63158eb2-a413-4b83-9218-cabc20543498\") " Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.616171 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/63158eb2-a413-4b83-9218-cabc20543498-dns-swift-storage-0\") pod \"63158eb2-a413-4b83-9218-cabc20543498\" (UID: \"63158eb2-a413-4b83-9218-cabc20543498\") " Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.616265 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/63158eb2-a413-4b83-9218-cabc20543498-ovsdbserver-nb\") pod \"63158eb2-a413-4b83-9218-cabc20543498\" (UID: \"63158eb2-a413-4b83-9218-cabc20543498\") " Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.616350 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/63158eb2-a413-4b83-9218-cabc20543498-ovsdbserver-sb\") pod \"63158eb2-a413-4b83-9218-cabc20543498\" (UID: \"63158eb2-a413-4b83-9218-cabc20543498\") " Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.616374 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/63158eb2-a413-4b83-9218-cabc20543498-config\") pod \"63158eb2-a413-4b83-9218-cabc20543498\" (UID: \"63158eb2-a413-4b83-9218-cabc20543498\") " Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.616458 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vwc8l\" (UniqueName: \"kubernetes.io/projected/63158eb2-a413-4b83-9218-cabc20543498-kube-api-access-vwc8l\") pod \"63158eb2-a413-4b83-9218-cabc20543498\" (UID: \"63158eb2-a413-4b83-9218-cabc20543498\") " Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.627227 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-fcdf5f968-7ppxk" podStartSLOduration=2.627182889 podStartE2EDuration="2.627182889s" podCreationTimestamp="2025-11-26 22:42:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:42:26.516888077 +0000 UTC m=+1275.207122987" watchObservedRunningTime="2025-11-26 22:42:26.627182889 +0000 UTC m=+1275.317417799" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.640574 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63158eb2-a413-4b83-9218-cabc20543498-kube-api-access-vwc8l" (OuterVolumeSpecName: "kube-api-access-vwc8l") pod "63158eb2-a413-4b83-9218-cabc20543498" (UID: "63158eb2-a413-4b83-9218-cabc20543498"). InnerVolumeSpecName "kube-api-access-vwc8l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.706348 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63158eb2-a413-4b83-9218-cabc20543498-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "63158eb2-a413-4b83-9218-cabc20543498" (UID: "63158eb2-a413-4b83-9218-cabc20543498"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.711331 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63158eb2-a413-4b83-9218-cabc20543498-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "63158eb2-a413-4b83-9218-cabc20543498" (UID: "63158eb2-a413-4b83-9218-cabc20543498"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.715386 4903 scope.go:117] "RemoveContainer" containerID="3f73b9d94076a8779d914ab901b72ba0e9e73767117fd6bd482afc5366bb7101" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.730259 4903 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/63158eb2-a413-4b83-9218-cabc20543498-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.730288 4903 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/63158eb2-a413-4b83-9218-cabc20543498-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.730297 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vwc8l\" (UniqueName: \"kubernetes.io/projected/63158eb2-a413-4b83-9218-cabc20543498-kube-api-access-vwc8l\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.743275 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63158eb2-a413-4b83-9218-cabc20543498-config" (OuterVolumeSpecName: "config") pod "63158eb2-a413-4b83-9218-cabc20543498" (UID: "63158eb2-a413-4b83-9218-cabc20543498"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.748836 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63158eb2-a413-4b83-9218-cabc20543498-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "63158eb2-a413-4b83-9218-cabc20543498" (UID: "63158eb2-a413-4b83-9218-cabc20543498"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.753507 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63158eb2-a413-4b83-9218-cabc20543498-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "63158eb2-a413-4b83-9218-cabc20543498" (UID: "63158eb2-a413-4b83-9218-cabc20543498"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.832446 4903 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/63158eb2-a413-4b83-9218-cabc20543498-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.832478 4903 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/63158eb2-a413-4b83-9218-cabc20543498-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.832491 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/63158eb2-a413-4b83-9218-cabc20543498-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.945049 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-qttx2"] Nov 26 22:42:26 crc kubenswrapper[4903]: I1126 22:42:26.956707 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-qttx2"] Nov 26 22:42:27 crc kubenswrapper[4903]: I1126 22:42:27.129927 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-674b64b8b-7ztpj"] Nov 26 22:42:27 crc kubenswrapper[4903]: I1126 22:42:27.158301 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-58b8c46b65-kdwlj"] Nov 26 22:42:27 crc kubenswrapper[4903]: I1126 22:42:27.178878 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-79464fcfdb-twqnx"] Nov 26 22:42:27 crc kubenswrapper[4903]: W1126 22:42:27.185898 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod40944cbe_7c1b_43b0_bed6_28f9490a0d5f.slice/crio-4026101c6ac74fb8513b83b958723d45d9c036a6506d5cfb40a439009550cf04 WatchSource:0}: Error finding container 4026101c6ac74fb8513b83b958723d45d9c036a6506d5cfb40a439009550cf04: Status 404 returned error can't find the container with id 4026101c6ac74fb8513b83b958723d45d9c036a6506d5cfb40a439009550cf04 Nov 26 22:42:27 crc kubenswrapper[4903]: I1126 22:42:27.360308 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-l8k8n"] Nov 26 22:42:27 crc kubenswrapper[4903]: I1126 22:42:27.554209 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-79464fcfdb-twqnx" event={"ID":"8824059c-5e2d-4ce5-b224-fc144593d08d","Type":"ContainerStarted","Data":"9c06670cf7e3b220e3604b61c3aae49fd837bad2b8f0b02165646b1322659699"} Nov 26 22:42:27 crc kubenswrapper[4903]: I1126 22:42:27.586965 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"635b390e-4ef6-41fb-a81d-cf4f819d2b66","Type":"ContainerStarted","Data":"2a0d4da81420be605fab4e1c6e2df4d96cf065623347e9f2136de15a180c6a27"} Nov 26 22:42:27 crc kubenswrapper[4903]: I1126 22:42:27.601256 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-674b64b8b-7ztpj" event={"ID":"c5e25b6a-d8fd-404a-b7f9-7ebad060a071","Type":"ContainerStarted","Data":"bbbe63fadf21dac554d3015cf1ed260a2f8ccf34be6ac5507eda98e5aec56f31"} Nov 26 22:42:27 crc kubenswrapper[4903]: I1126 22:42:27.614440 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-58b8c46b65-kdwlj" event={"ID":"40944cbe-7c1b-43b0-bed6-28f9490a0d5f","Type":"ContainerStarted","Data":"4026101c6ac74fb8513b83b958723d45d9c036a6506d5cfb40a439009550cf04"} Nov 26 22:42:27 crc kubenswrapper[4903]: I1126 22:42:27.622010 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"306f5561-d47c-4e76-a4ab-5320d327954d","Type":"ContainerStarted","Data":"f51c317ace15fdc8c4f6bc5041bb57e433115de30aa770ec064ccecbb29c7035"} Nov 26 22:42:27 crc kubenswrapper[4903]: I1126 22:42:27.651645 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=4.651626392 podStartE2EDuration="4.651626392s" podCreationTimestamp="2025-11-26 22:42:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:42:27.642229891 +0000 UTC m=+1276.332464801" watchObservedRunningTime="2025-11-26 22:42:27.651626392 +0000 UTC m=+1276.341861302" Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.044193 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="63158eb2-a413-4b83-9218-cabc20543498" path="/var/lib/kubelet/pods/63158eb2-a413-4b83-9218-cabc20543498/volumes" Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.667096 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-fxksz" event={"ID":"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c","Type":"ContainerStarted","Data":"b77c66d26b5e5affdaef4e9bc1f1f0fe69ceb3ffbf82926fbafec37d80355562"} Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.683138 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"635b390e-4ef6-41fb-a81d-cf4f819d2b66","Type":"ContainerStarted","Data":"0a32b28d121888b53d1f6c7cecf250676da76bd12f79d54eb1bc8f0ab2de18cd"} Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.697852 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-674b64b8b-7ztpj" event={"ID":"c5e25b6a-d8fd-404a-b7f9-7ebad060a071","Type":"ContainerStarted","Data":"09494e81cab83fe57030cdf3ed18d68afb788438da6418f10e3dc11c04c4d745"} Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.697896 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-674b64b8b-7ztpj" event={"ID":"c5e25b6a-d8fd-404a-b7f9-7ebad060a071","Type":"ContainerStarted","Data":"55f1150f61627d147e75510fea1af37e612819119a133099b4fb25ce81d5989c"} Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.699105 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-674b64b8b-7ztpj" Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.699136 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-674b64b8b-7ztpj" Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.699941 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-fxksz" podStartSLOduration=5.199614751 podStartE2EDuration="45.699925224s" podCreationTimestamp="2025-11-26 22:41:43 +0000 UTC" firstStartedPulling="2025-11-26 22:41:45.416449949 +0000 UTC m=+1234.106684859" lastFinishedPulling="2025-11-26 22:42:25.916760422 +0000 UTC m=+1274.606995332" observedRunningTime="2025-11-26 22:42:28.695887586 +0000 UTC m=+1277.386122496" watchObservedRunningTime="2025-11-26 22:42:28.699925224 +0000 UTC m=+1277.390160124" Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.712735 4903 generic.go:334] "Generic (PLEG): container finished" podID="f6a087fc-426b-4353-b0dc-552c66dfef8a" containerID="979026f7e18615291d4b5f9a0322c758dee8ead43dd68fbd87520f88de3d7f95" exitCode=0 Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.712874 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" event={"ID":"f6a087fc-426b-4353-b0dc-552c66dfef8a","Type":"ContainerDied","Data":"979026f7e18615291d4b5f9a0322c758dee8ead43dd68fbd87520f88de3d7f95"} Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.712911 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" event={"ID":"f6a087fc-426b-4353-b0dc-552c66dfef8a","Type":"ContainerStarted","Data":"b1fd7ba1e1457d1176528bcfba8e69374f3011da67b99fc1620fa994eadd710b"} Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.732353 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.732335722 podStartE2EDuration="4.732335722s" podCreationTimestamp="2025-11-26 22:42:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:42:28.726263709 +0000 UTC m=+1277.416498619" watchObservedRunningTime="2025-11-26 22:42:28.732335722 +0000 UTC m=+1277.422570632" Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.765398 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-86bcb477db-8xtr8"] Nov 26 22:42:28 crc kubenswrapper[4903]: E1126 22:42:28.767061 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63158eb2-a413-4b83-9218-cabc20543498" containerName="init" Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.767087 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="63158eb2-a413-4b83-9218-cabc20543498" containerName="init" Nov 26 22:42:28 crc kubenswrapper[4903]: E1126 22:42:28.767113 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63158eb2-a413-4b83-9218-cabc20543498" containerName="dnsmasq-dns" Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.767122 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="63158eb2-a413-4b83-9218-cabc20543498" containerName="dnsmasq-dns" Nov 26 22:42:28 crc kubenswrapper[4903]: E1126 22:42:28.767148 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a96189f-52eb-44aa-8638-96d516cd0eb3" containerName="heat-db-sync" Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.767156 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a96189f-52eb-44aa-8638-96d516cd0eb3" containerName="heat-db-sync" Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.767428 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a96189f-52eb-44aa-8638-96d516cd0eb3" containerName="heat-db-sync" Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.767447 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="63158eb2-a413-4b83-9218-cabc20543498" containerName="dnsmasq-dns" Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.768668 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-86bcb477db-8xtr8" Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.770200 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.770442 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.783525 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-86bcb477db-8xtr8"] Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.794613 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-674b64b8b-7ztpj" podStartSLOduration=3.7945953279999998 podStartE2EDuration="3.794595328s" podCreationTimestamp="2025-11-26 22:42:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:42:28.751131604 +0000 UTC m=+1277.441366504" watchObservedRunningTime="2025-11-26 22:42:28.794595328 +0000 UTC m=+1277.484830228" Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.889186 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37c314f3-5577-423f-887f-7c551f339c3b-logs\") pod \"barbican-api-86bcb477db-8xtr8\" (UID: \"37c314f3-5577-423f-887f-7c551f339c3b\") " pod="openstack/barbican-api-86bcb477db-8xtr8" Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.889239 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rdslr\" (UniqueName: \"kubernetes.io/projected/37c314f3-5577-423f-887f-7c551f339c3b-kube-api-access-rdslr\") pod \"barbican-api-86bcb477db-8xtr8\" (UID: \"37c314f3-5577-423f-887f-7c551f339c3b\") " pod="openstack/barbican-api-86bcb477db-8xtr8" Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.889296 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37c314f3-5577-423f-887f-7c551f339c3b-combined-ca-bundle\") pod \"barbican-api-86bcb477db-8xtr8\" (UID: \"37c314f3-5577-423f-887f-7c551f339c3b\") " pod="openstack/barbican-api-86bcb477db-8xtr8" Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.889338 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/37c314f3-5577-423f-887f-7c551f339c3b-public-tls-certs\") pod \"barbican-api-86bcb477db-8xtr8\" (UID: \"37c314f3-5577-423f-887f-7c551f339c3b\") " pod="openstack/barbican-api-86bcb477db-8xtr8" Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.889358 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37c314f3-5577-423f-887f-7c551f339c3b-config-data\") pod \"barbican-api-86bcb477db-8xtr8\" (UID: \"37c314f3-5577-423f-887f-7c551f339c3b\") " pod="openstack/barbican-api-86bcb477db-8xtr8" Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.889424 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/37c314f3-5577-423f-887f-7c551f339c3b-config-data-custom\") pod \"barbican-api-86bcb477db-8xtr8\" (UID: \"37c314f3-5577-423f-887f-7c551f339c3b\") " pod="openstack/barbican-api-86bcb477db-8xtr8" Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.889455 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/37c314f3-5577-423f-887f-7c551f339c3b-internal-tls-certs\") pod \"barbican-api-86bcb477db-8xtr8\" (UID: \"37c314f3-5577-423f-887f-7c551f339c3b\") " pod="openstack/barbican-api-86bcb477db-8xtr8" Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.994418 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/37c314f3-5577-423f-887f-7c551f339c3b-config-data-custom\") pod \"barbican-api-86bcb477db-8xtr8\" (UID: \"37c314f3-5577-423f-887f-7c551f339c3b\") " pod="openstack/barbican-api-86bcb477db-8xtr8" Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.994716 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/37c314f3-5577-423f-887f-7c551f339c3b-internal-tls-certs\") pod \"barbican-api-86bcb477db-8xtr8\" (UID: \"37c314f3-5577-423f-887f-7c551f339c3b\") " pod="openstack/barbican-api-86bcb477db-8xtr8" Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.994788 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37c314f3-5577-423f-887f-7c551f339c3b-logs\") pod \"barbican-api-86bcb477db-8xtr8\" (UID: \"37c314f3-5577-423f-887f-7c551f339c3b\") " pod="openstack/barbican-api-86bcb477db-8xtr8" Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.994817 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdslr\" (UniqueName: \"kubernetes.io/projected/37c314f3-5577-423f-887f-7c551f339c3b-kube-api-access-rdslr\") pod \"barbican-api-86bcb477db-8xtr8\" (UID: \"37c314f3-5577-423f-887f-7c551f339c3b\") " pod="openstack/barbican-api-86bcb477db-8xtr8" Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.994862 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37c314f3-5577-423f-887f-7c551f339c3b-combined-ca-bundle\") pod \"barbican-api-86bcb477db-8xtr8\" (UID: \"37c314f3-5577-423f-887f-7c551f339c3b\") " pod="openstack/barbican-api-86bcb477db-8xtr8" Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.994902 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/37c314f3-5577-423f-887f-7c551f339c3b-public-tls-certs\") pod \"barbican-api-86bcb477db-8xtr8\" (UID: \"37c314f3-5577-423f-887f-7c551f339c3b\") " pod="openstack/barbican-api-86bcb477db-8xtr8" Nov 26 22:42:28 crc kubenswrapper[4903]: I1126 22:42:28.994921 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37c314f3-5577-423f-887f-7c551f339c3b-config-data\") pod \"barbican-api-86bcb477db-8xtr8\" (UID: \"37c314f3-5577-423f-887f-7c551f339c3b\") " pod="openstack/barbican-api-86bcb477db-8xtr8" Nov 26 22:42:29 crc kubenswrapper[4903]: I1126 22:42:29.000432 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/37c314f3-5577-423f-887f-7c551f339c3b-internal-tls-certs\") pod \"barbican-api-86bcb477db-8xtr8\" (UID: \"37c314f3-5577-423f-887f-7c551f339c3b\") " pod="openstack/barbican-api-86bcb477db-8xtr8" Nov 26 22:42:29 crc kubenswrapper[4903]: I1126 22:42:29.000960 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37c314f3-5577-423f-887f-7c551f339c3b-logs\") pod \"barbican-api-86bcb477db-8xtr8\" (UID: \"37c314f3-5577-423f-887f-7c551f339c3b\") " pod="openstack/barbican-api-86bcb477db-8xtr8" Nov 26 22:42:29 crc kubenswrapper[4903]: I1126 22:42:29.003427 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/37c314f3-5577-423f-887f-7c551f339c3b-config-data-custom\") pod \"barbican-api-86bcb477db-8xtr8\" (UID: \"37c314f3-5577-423f-887f-7c551f339c3b\") " pod="openstack/barbican-api-86bcb477db-8xtr8" Nov 26 22:42:29 crc kubenswrapper[4903]: I1126 22:42:29.006232 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37c314f3-5577-423f-887f-7c551f339c3b-combined-ca-bundle\") pod \"barbican-api-86bcb477db-8xtr8\" (UID: \"37c314f3-5577-423f-887f-7c551f339c3b\") " pod="openstack/barbican-api-86bcb477db-8xtr8" Nov 26 22:42:29 crc kubenswrapper[4903]: I1126 22:42:29.006799 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37c314f3-5577-423f-887f-7c551f339c3b-config-data\") pod \"barbican-api-86bcb477db-8xtr8\" (UID: \"37c314f3-5577-423f-887f-7c551f339c3b\") " pod="openstack/barbican-api-86bcb477db-8xtr8" Nov 26 22:42:29 crc kubenswrapper[4903]: I1126 22:42:29.007792 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/37c314f3-5577-423f-887f-7c551f339c3b-public-tls-certs\") pod \"barbican-api-86bcb477db-8xtr8\" (UID: \"37c314f3-5577-423f-887f-7c551f339c3b\") " pod="openstack/barbican-api-86bcb477db-8xtr8" Nov 26 22:42:29 crc kubenswrapper[4903]: I1126 22:42:29.012108 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdslr\" (UniqueName: \"kubernetes.io/projected/37c314f3-5577-423f-887f-7c551f339c3b-kube-api-access-rdslr\") pod \"barbican-api-86bcb477db-8xtr8\" (UID: \"37c314f3-5577-423f-887f-7c551f339c3b\") " pod="openstack/barbican-api-86bcb477db-8xtr8" Nov 26 22:42:29 crc kubenswrapper[4903]: I1126 22:42:29.175890 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-86bcb477db-8xtr8" Nov 26 22:42:29 crc kubenswrapper[4903]: I1126 22:42:29.744407 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" event={"ID":"f6a087fc-426b-4353-b0dc-552c66dfef8a","Type":"ContainerStarted","Data":"32947ba5fa7a59a9e70901886ea54f300b231470d791724a8f43322735c4048a"} Nov 26 22:42:29 crc kubenswrapper[4903]: I1126 22:42:29.767207 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" podStartSLOduration=4.7671927830000005 podStartE2EDuration="4.767192783s" podCreationTimestamp="2025-11-26 22:42:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:42:29.765041855 +0000 UTC m=+1278.455276765" watchObservedRunningTime="2025-11-26 22:42:29.767192783 +0000 UTC m=+1278.457427693" Nov 26 22:42:30 crc kubenswrapper[4903]: I1126 22:42:30.088147 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-86bcb477db-8xtr8"] Nov 26 22:42:30 crc kubenswrapper[4903]: I1126 22:42:30.760203 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-58b8c46b65-kdwlj" event={"ID":"40944cbe-7c1b-43b0-bed6-28f9490a0d5f","Type":"ContainerStarted","Data":"b6468da36cee98f198e1bca7431d62948be37f29c97355e33de498facfac4220"} Nov 26 22:42:30 crc kubenswrapper[4903]: I1126 22:42:30.760634 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-58b8c46b65-kdwlj" event={"ID":"40944cbe-7c1b-43b0-bed6-28f9490a0d5f","Type":"ContainerStarted","Data":"a2de991846952b8e2dd10ee2e6751aeb871a29f3877fdf92f717faf97efe8c8c"} Nov 26 22:42:30 crc kubenswrapper[4903]: I1126 22:42:30.765119 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-79464fcfdb-twqnx" event={"ID":"8824059c-5e2d-4ce5-b224-fc144593d08d","Type":"ContainerStarted","Data":"6bce51a236d2e46e399eb6a46202ecdb2a33f8b40fa267000a44d34c93e64823"} Nov 26 22:42:30 crc kubenswrapper[4903]: I1126 22:42:30.765187 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-79464fcfdb-twqnx" event={"ID":"8824059c-5e2d-4ce5-b224-fc144593d08d","Type":"ContainerStarted","Data":"74f368dcfac67bc6b117cc0db4e7322d20cfb94bbc106cbb978ca49d02869fac"} Nov 26 22:42:30 crc kubenswrapper[4903]: I1126 22:42:30.765893 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" Nov 26 22:42:30 crc kubenswrapper[4903]: I1126 22:42:30.777535 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-58b8c46b65-kdwlj" podStartSLOduration=3.346972036 podStartE2EDuration="5.777519329s" podCreationTimestamp="2025-11-26 22:42:25 +0000 UTC" firstStartedPulling="2025-11-26 22:42:27.227244912 +0000 UTC m=+1275.917479822" lastFinishedPulling="2025-11-26 22:42:29.657792205 +0000 UTC m=+1278.348027115" observedRunningTime="2025-11-26 22:42:30.775066613 +0000 UTC m=+1279.465301533" watchObservedRunningTime="2025-11-26 22:42:30.777519329 +0000 UTC m=+1279.467754249" Nov 26 22:42:30 crc kubenswrapper[4903]: I1126 22:42:30.815468 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-79464fcfdb-twqnx" podStartSLOduration=3.352288278 podStartE2EDuration="5.815447774s" podCreationTimestamp="2025-11-26 22:42:25 +0000 UTC" firstStartedPulling="2025-11-26 22:42:27.195267746 +0000 UTC m=+1275.885502656" lastFinishedPulling="2025-11-26 22:42:29.658427242 +0000 UTC m=+1278.348662152" observedRunningTime="2025-11-26 22:42:30.806023492 +0000 UTC m=+1279.496258422" watchObservedRunningTime="2025-11-26 22:42:30.815447774 +0000 UTC m=+1279.505682694" Nov 26 22:42:31 crc kubenswrapper[4903]: I1126 22:42:31.981058 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 22:42:31 crc kubenswrapper[4903]: I1126 22:42:31.981388 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 22:42:31 crc kubenswrapper[4903]: I1126 22:42:31.981434 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 22:42:31 crc kubenswrapper[4903]: I1126 22:42:31.982375 4903 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"847f128b302f65a898dbd9690c3fc64381891e05dcde345636fcc588de735302"} pod="openshift-machine-config-operator/machine-config-daemon-wjwph" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 22:42:31 crc kubenswrapper[4903]: I1126 22:42:31.982463 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" containerID="cri-o://847f128b302f65a898dbd9690c3fc64381891e05dcde345636fcc588de735302" gracePeriod=600 Nov 26 22:42:32 crc kubenswrapper[4903]: I1126 22:42:32.792459 4903 generic.go:334] "Generic (PLEG): container finished" podID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerID="847f128b302f65a898dbd9690c3fc64381891e05dcde345636fcc588de735302" exitCode=0 Nov 26 22:42:32 crc kubenswrapper[4903]: I1126 22:42:32.792772 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerDied","Data":"847f128b302f65a898dbd9690c3fc64381891e05dcde345636fcc588de735302"} Nov 26 22:42:32 crc kubenswrapper[4903]: I1126 22:42:32.792804 4903 scope.go:117] "RemoveContainer" containerID="9f68f340d26b09594de1e8e15e4a05a42e976379d490a108a32a7c6572cae165" Nov 26 22:42:32 crc kubenswrapper[4903]: I1126 22:42:32.796464 4903 generic.go:334] "Generic (PLEG): container finished" podID="d0b2d5fd-9425-4082-b1cc-3ce796c82e0c" containerID="b77c66d26b5e5affdaef4e9bc1f1f0fe69ceb3ffbf82926fbafec37d80355562" exitCode=0 Nov 26 22:42:32 crc kubenswrapper[4903]: I1126 22:42:32.796507 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-fxksz" event={"ID":"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c","Type":"ContainerDied","Data":"b77c66d26b5e5affdaef4e9bc1f1f0fe69ceb3ffbf82926fbafec37d80355562"} Nov 26 22:42:33 crc kubenswrapper[4903]: W1126 22:42:33.749075 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod37c314f3_5577_423f_887f_7c551f339c3b.slice/crio-a34b7707449f44f9c9e717e061ebaf71a53e75205aa14b14db323aaffc2ee45c WatchSource:0}: Error finding container a34b7707449f44f9c9e717e061ebaf71a53e75205aa14b14db323aaffc2ee45c: Status 404 returned error can't find the container with id a34b7707449f44f9c9e717e061ebaf71a53e75205aa14b14db323aaffc2ee45c Nov 26 22:42:33 crc kubenswrapper[4903]: I1126 22:42:33.810778 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-86bcb477db-8xtr8" event={"ID":"37c314f3-5577-423f-887f-7c551f339c3b","Type":"ContainerStarted","Data":"a34b7707449f44f9c9e717e061ebaf71a53e75205aa14b14db323aaffc2ee45c"} Nov 26 22:42:34 crc kubenswrapper[4903]: I1126 22:42:34.017919 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 22:42:34 crc kubenswrapper[4903]: I1126 22:42:34.017989 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 22:42:34 crc kubenswrapper[4903]: I1126 22:42:34.067355 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 22:42:34 crc kubenswrapper[4903]: I1126 22:42:34.081158 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 22:42:34 crc kubenswrapper[4903]: I1126 22:42:34.729994 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 22:42:34 crc kubenswrapper[4903]: I1126 22:42:34.730080 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 22:42:34 crc kubenswrapper[4903]: I1126 22:42:34.772948 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 22:42:34 crc kubenswrapper[4903]: I1126 22:42:34.825910 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 22:42:34 crc kubenswrapper[4903]: I1126 22:42:34.833134 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 22:42:34 crc kubenswrapper[4903]: I1126 22:42:34.833172 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 22:42:34 crc kubenswrapper[4903]: I1126 22:42:34.833185 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 22:42:34 crc kubenswrapper[4903]: I1126 22:42:34.833196 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 22:42:35 crc kubenswrapper[4903]: I1126 22:42:35.822173 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-fxksz" Nov 26 22:42:35 crc kubenswrapper[4903]: I1126 22:42:35.888506 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-db-sync-config-data\") pod \"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c\" (UID: \"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c\") " Nov 26 22:42:35 crc kubenswrapper[4903]: I1126 22:42:35.888592 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-config-data\") pod \"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c\" (UID: \"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c\") " Nov 26 22:42:35 crc kubenswrapper[4903]: I1126 22:42:35.888727 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-scripts\") pod \"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c\" (UID: \"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c\") " Nov 26 22:42:35 crc kubenswrapper[4903]: I1126 22:42:35.888812 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-combined-ca-bundle\") pod \"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c\" (UID: \"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c\") " Nov 26 22:42:35 crc kubenswrapper[4903]: I1126 22:42:35.888895 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-etc-machine-id\") pod \"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c\" (UID: \"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c\") " Nov 26 22:42:35 crc kubenswrapper[4903]: I1126 22:42:35.889013 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-26f6j\" (UniqueName: \"kubernetes.io/projected/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-kube-api-access-26f6j\") pod \"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c\" (UID: \"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c\") " Nov 26 22:42:35 crc kubenswrapper[4903]: I1126 22:42:35.889469 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "d0b2d5fd-9425-4082-b1cc-3ce796c82e0c" (UID: "d0b2d5fd-9425-4082-b1cc-3ce796c82e0c"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:42:35 crc kubenswrapper[4903]: I1126 22:42:35.894308 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-kube-api-access-26f6j" (OuterVolumeSpecName: "kube-api-access-26f6j") pod "d0b2d5fd-9425-4082-b1cc-3ce796c82e0c" (UID: "d0b2d5fd-9425-4082-b1cc-3ce796c82e0c"). InnerVolumeSpecName "kube-api-access-26f6j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:42:35 crc kubenswrapper[4903]: I1126 22:42:35.894811 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-scripts" (OuterVolumeSpecName: "scripts") pod "d0b2d5fd-9425-4082-b1cc-3ce796c82e0c" (UID: "d0b2d5fd-9425-4082-b1cc-3ce796c82e0c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:35 crc kubenswrapper[4903]: I1126 22:42:35.899685 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "d0b2d5fd-9425-4082-b1cc-3ce796c82e0c" (UID: "d0b2d5fd-9425-4082-b1cc-3ce796c82e0c"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:35 crc kubenswrapper[4903]: I1126 22:42:35.901026 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-fxksz" Nov 26 22:42:35 crc kubenswrapper[4903]: I1126 22:42:35.901237 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-fxksz" event={"ID":"d0b2d5fd-9425-4082-b1cc-3ce796c82e0c","Type":"ContainerDied","Data":"ad2000c5b6ecd8a5eadcf57407ee4dfdb2337180b6f24eab030b76d5ad7ff32b"} Nov 26 22:42:35 crc kubenswrapper[4903]: I1126 22:42:35.901265 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad2000c5b6ecd8a5eadcf57407ee4dfdb2337180b6f24eab030b76d5ad7ff32b" Nov 26 22:42:35 crc kubenswrapper[4903]: I1126 22:42:35.991500 4903 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:35 crc kubenswrapper[4903]: I1126 22:42:35.991805 4903 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:35 crc kubenswrapper[4903]: I1126 22:42:35.991816 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-26f6j\" (UniqueName: \"kubernetes.io/projected/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-kube-api-access-26f6j\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:35 crc kubenswrapper[4903]: I1126 22:42:35.991826 4903 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.013605 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d0b2d5fd-9425-4082-b1cc-3ce796c82e0c" (UID: "d0b2d5fd-9425-4082-b1cc-3ce796c82e0c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.070522 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-config-data" (OuterVolumeSpecName: "config-data") pod "d0b2d5fd-9425-4082-b1cc-3ce796c82e0c" (UID: "d0b2d5fd-9425-4082-b1cc-3ce796c82e0c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.093297 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.093318 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.102835 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" Nov 26 22:42:36 crc kubenswrapper[4903]: E1126 22:42:36.118269 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="e3b2305d-da0f-4efb-9ac8-3df8527f9dec" Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.198916 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-58dd9ff6bc-htt6q"] Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.199322 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" podUID="01971aea-a19a-4c71-a893-b6ee8277160d" containerName="dnsmasq-dns" containerID="cri-o://9f0b48aed9d3be0d5f6315ec0a04331b82ac108fdc78630d6977d1c25035206c" gracePeriod=10 Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.817575 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.929669 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerStarted","Data":"a7d63365977e5a796bff719bf93bb8deb37153f9e84b6763869530932a4e1b36"} Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.932434 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/01971aea-a19a-4c71-a893-b6ee8277160d-dns-swift-storage-0\") pod \"01971aea-a19a-4c71-a893-b6ee8277160d\" (UID: \"01971aea-a19a-4c71-a893-b6ee8277160d\") " Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.932606 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t7phz\" (UniqueName: \"kubernetes.io/projected/01971aea-a19a-4c71-a893-b6ee8277160d-kube-api-access-t7phz\") pod \"01971aea-a19a-4c71-a893-b6ee8277160d\" (UID: \"01971aea-a19a-4c71-a893-b6ee8277160d\") " Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.932730 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01971aea-a19a-4c71-a893-b6ee8277160d-config\") pod \"01971aea-a19a-4c71-a893-b6ee8277160d\" (UID: \"01971aea-a19a-4c71-a893-b6ee8277160d\") " Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.932847 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/01971aea-a19a-4c71-a893-b6ee8277160d-dns-svc\") pod \"01971aea-a19a-4c71-a893-b6ee8277160d\" (UID: \"01971aea-a19a-4c71-a893-b6ee8277160d\") " Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.932960 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/01971aea-a19a-4c71-a893-b6ee8277160d-ovsdbserver-sb\") pod \"01971aea-a19a-4c71-a893-b6ee8277160d\" (UID: \"01971aea-a19a-4c71-a893-b6ee8277160d\") " Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.933130 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/01971aea-a19a-4c71-a893-b6ee8277160d-ovsdbserver-nb\") pod \"01971aea-a19a-4c71-a893-b6ee8277160d\" (UID: \"01971aea-a19a-4c71-a893-b6ee8277160d\") " Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.937837 4903 generic.go:334] "Generic (PLEG): container finished" podID="01971aea-a19a-4c71-a893-b6ee8277160d" containerID="9f0b48aed9d3be0d5f6315ec0a04331b82ac108fdc78630d6977d1c25035206c" exitCode=0 Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.937994 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" event={"ID":"01971aea-a19a-4c71-a893-b6ee8277160d","Type":"ContainerDied","Data":"9f0b48aed9d3be0d5f6315ec0a04331b82ac108fdc78630d6977d1c25035206c"} Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.938080 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" event={"ID":"01971aea-a19a-4c71-a893-b6ee8277160d","Type":"ContainerDied","Data":"eb43f1c16c353287d45d2e83ce3ae153756967cc68b530972ae006cf7b1705b3"} Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.938163 4903 scope.go:117] "RemoveContainer" containerID="9f0b48aed9d3be0d5f6315ec0a04331b82ac108fdc78630d6977d1c25035206c" Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.938383 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58dd9ff6bc-htt6q" Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.941844 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-86bcb477db-8xtr8" event={"ID":"37c314f3-5577-423f-887f-7c551f339c3b","Type":"ContainerStarted","Data":"a9776c521522610b44c4c03e1a7c9b08974627e33be86bf76068cf7ca2843579"} Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.943152 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-86bcb477db-8xtr8" Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.943229 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-86bcb477db-8xtr8" Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.943305 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-86bcb477db-8xtr8" event={"ID":"37c314f3-5577-423f-887f-7c551f339c3b","Type":"ContainerStarted","Data":"75ac335460c5df5d704e91dc2ab996764afc795c292bb26acce575082c038b14"} Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.949593 4903 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.949616 4903 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.949753 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e3b2305d-da0f-4efb-9ac8-3df8527f9dec" containerName="ceilometer-notification-agent" containerID="cri-o://80fae27da19b974cb9e0b08887be36e3c9e401e504cd21caf4c2b5afbf2df925" gracePeriod=30 Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.949873 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e3b2305d-da0f-4efb-9ac8-3df8527f9dec","Type":"ContainerStarted","Data":"8798c19cea002eaec469e42f015418a53f1410a66e2cfa418d74c87761310f3d"} Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.949970 4903 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.949987 4903 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.950440 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e3b2305d-da0f-4efb-9ac8-3df8527f9dec" containerName="proxy-httpd" containerID="cri-o://8798c19cea002eaec469e42f015418a53f1410a66e2cfa418d74c87761310f3d" gracePeriod=30 Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.950586 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e3b2305d-da0f-4efb-9ac8-3df8527f9dec" containerName="sg-core" containerID="cri-o://c6fe33af25f2479439e622058688ae8cb76e78f88ed71095ccafa36430df3db2" gracePeriod=30 Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.951095 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.958787 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01971aea-a19a-4c71-a893-b6ee8277160d-kube-api-access-t7phz" (OuterVolumeSpecName: "kube-api-access-t7phz") pod "01971aea-a19a-4c71-a893-b6ee8277160d" (UID: "01971aea-a19a-4c71-a893-b6ee8277160d"). InnerVolumeSpecName "kube-api-access-t7phz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:42:36 crc kubenswrapper[4903]: I1126 22:42:36.988599 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-86bcb477db-8xtr8" podStartSLOduration=8.988576341 podStartE2EDuration="8.988576341s" podCreationTimestamp="2025-11-26 22:42:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:42:36.976617831 +0000 UTC m=+1285.666852741" watchObservedRunningTime="2025-11-26 22:42:36.988576341 +0000 UTC m=+1285.678811251" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.020899 4903 scope.go:117] "RemoveContainer" containerID="5c8ffd427c8deabb98f42817bc106ea201f2a22167d555e80493dad5988c1d2d" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.035578 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01971aea-a19a-4c71-a893-b6ee8277160d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "01971aea-a19a-4c71-a893-b6ee8277160d" (UID: "01971aea-a19a-4c71-a893-b6ee8277160d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.039325 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t7phz\" (UniqueName: \"kubernetes.io/projected/01971aea-a19a-4c71-a893-b6ee8277160d-kube-api-access-t7phz\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.039348 4903 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/01971aea-a19a-4c71-a893-b6ee8277160d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.103982 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 22:42:37 crc kubenswrapper[4903]: E1126 22:42:37.104564 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01971aea-a19a-4c71-a893-b6ee8277160d" containerName="init" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.104649 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="01971aea-a19a-4c71-a893-b6ee8277160d" containerName="init" Nov 26 22:42:37 crc kubenswrapper[4903]: E1126 22:42:37.104722 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0b2d5fd-9425-4082-b1cc-3ce796c82e0c" containerName="cinder-db-sync" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.104786 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0b2d5fd-9425-4082-b1cc-3ce796c82e0c" containerName="cinder-db-sync" Nov 26 22:42:37 crc kubenswrapper[4903]: E1126 22:42:37.104848 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01971aea-a19a-4c71-a893-b6ee8277160d" containerName="dnsmasq-dns" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.104896 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="01971aea-a19a-4c71-a893-b6ee8277160d" containerName="dnsmasq-dns" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.105162 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="01971aea-a19a-4c71-a893-b6ee8277160d" containerName="dnsmasq-dns" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.105246 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0b2d5fd-9425-4082-b1cc-3ce796c82e0c" containerName="cinder-db-sync" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.106150 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01971aea-a19a-4c71-a893-b6ee8277160d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "01971aea-a19a-4c71-a893-b6ee8277160d" (UID: "01971aea-a19a-4c71-a893-b6ee8277160d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.109500 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.114111 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.114303 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.114546 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-q7t2v" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.116398 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.119390 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.125975 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01971aea-a19a-4c71-a893-b6ee8277160d-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "01971aea-a19a-4c71-a893-b6ee8277160d" (UID: "01971aea-a19a-4c71-a893-b6ee8277160d"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.143618 4903 scope.go:117] "RemoveContainer" containerID="9f0b48aed9d3be0d5f6315ec0a04331b82ac108fdc78630d6977d1c25035206c" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.144238 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01971aea-a19a-4c71-a893-b6ee8277160d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "01971aea-a19a-4c71-a893-b6ee8277160d" (UID: "01971aea-a19a-4c71-a893-b6ee8277160d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.147413 4903 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/01971aea-a19a-4c71-a893-b6ee8277160d-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.147440 4903 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/01971aea-a19a-4c71-a893-b6ee8277160d-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.147449 4903 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/01971aea-a19a-4c71-a893-b6ee8277160d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:37 crc kubenswrapper[4903]: E1126 22:42:37.160874 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f0b48aed9d3be0d5f6315ec0a04331b82ac108fdc78630d6977d1c25035206c\": container with ID starting with 9f0b48aed9d3be0d5f6315ec0a04331b82ac108fdc78630d6977d1c25035206c not found: ID does not exist" containerID="9f0b48aed9d3be0d5f6315ec0a04331b82ac108fdc78630d6977d1c25035206c" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.160921 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f0b48aed9d3be0d5f6315ec0a04331b82ac108fdc78630d6977d1c25035206c"} err="failed to get container status \"9f0b48aed9d3be0d5f6315ec0a04331b82ac108fdc78630d6977d1c25035206c\": rpc error: code = NotFound desc = could not find container \"9f0b48aed9d3be0d5f6315ec0a04331b82ac108fdc78630d6977d1c25035206c\": container with ID starting with 9f0b48aed9d3be0d5f6315ec0a04331b82ac108fdc78630d6977d1c25035206c not found: ID does not exist" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.160947 4903 scope.go:117] "RemoveContainer" containerID="5c8ffd427c8deabb98f42817bc106ea201f2a22167d555e80493dad5988c1d2d" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.182801 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-2z8db"] Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.189219 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.191051 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01971aea-a19a-4c71-a893-b6ee8277160d-config" (OuterVolumeSpecName: "config") pod "01971aea-a19a-4c71-a893-b6ee8277160d" (UID: "01971aea-a19a-4c71-a893-b6ee8277160d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:42:37 crc kubenswrapper[4903]: E1126 22:42:37.191117 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5c8ffd427c8deabb98f42817bc106ea201f2a22167d555e80493dad5988c1d2d\": container with ID starting with 5c8ffd427c8deabb98f42817bc106ea201f2a22167d555e80493dad5988c1d2d not found: ID does not exist" containerID="5c8ffd427c8deabb98f42817bc106ea201f2a22167d555e80493dad5988c1d2d" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.191172 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c8ffd427c8deabb98f42817bc106ea201f2a22167d555e80493dad5988c1d2d"} err="failed to get container status \"5c8ffd427c8deabb98f42817bc106ea201f2a22167d555e80493dad5988c1d2d\": rpc error: code = NotFound desc = could not find container \"5c8ffd427c8deabb98f42817bc106ea201f2a22167d555e80493dad5988c1d2d\": container with ID starting with 5c8ffd427c8deabb98f42817bc106ea201f2a22167d555e80493dad5988c1d2d not found: ID does not exist" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.198369 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-2z8db"] Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.249961 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a4997378-3318-4e89-8210-5bfefe2d9467-dns-swift-storage-0\") pod \"dnsmasq-dns-5c9776ccc5-2z8db\" (UID: \"a4997378-3318-4e89-8210-5bfefe2d9467\") " pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.250007 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5k8tj\" (UniqueName: \"kubernetes.io/projected/a4997378-3318-4e89-8210-5bfefe2d9467-kube-api-access-5k8tj\") pod \"dnsmasq-dns-5c9776ccc5-2z8db\" (UID: \"a4997378-3318-4e89-8210-5bfefe2d9467\") " pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.250052 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7fa1244c-f57b-4679-8845-ad503d45384f-scripts\") pod \"cinder-scheduler-0\" (UID: \"7fa1244c-f57b-4679-8845-ad503d45384f\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.250086 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a4997378-3318-4e89-8210-5bfefe2d9467-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9776ccc5-2z8db\" (UID: \"a4997378-3318-4e89-8210-5bfefe2d9467\") " pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.250114 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7fa1244c-f57b-4679-8845-ad503d45384f-config-data\") pod \"cinder-scheduler-0\" (UID: \"7fa1244c-f57b-4679-8845-ad503d45384f\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.250138 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7fa1244c-f57b-4679-8845-ad503d45384f-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"7fa1244c-f57b-4679-8845-ad503d45384f\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.250171 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a4997378-3318-4e89-8210-5bfefe2d9467-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9776ccc5-2z8db\" (UID: \"a4997378-3318-4e89-8210-5bfefe2d9467\") " pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.250191 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4997378-3318-4e89-8210-5bfefe2d9467-config\") pod \"dnsmasq-dns-5c9776ccc5-2z8db\" (UID: \"a4997378-3318-4e89-8210-5bfefe2d9467\") " pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.250231 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a4997378-3318-4e89-8210-5bfefe2d9467-dns-svc\") pod \"dnsmasq-dns-5c9776ccc5-2z8db\" (UID: \"a4997378-3318-4e89-8210-5bfefe2d9467\") " pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.250257 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fa1244c-f57b-4679-8845-ad503d45384f-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"7fa1244c-f57b-4679-8845-ad503d45384f\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.250277 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5zkk\" (UniqueName: \"kubernetes.io/projected/7fa1244c-f57b-4679-8845-ad503d45384f-kube-api-access-h5zkk\") pod \"cinder-scheduler-0\" (UID: \"7fa1244c-f57b-4679-8845-ad503d45384f\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.250293 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7fa1244c-f57b-4679-8845-ad503d45384f-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"7fa1244c-f57b-4679-8845-ad503d45384f\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.250375 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01971aea-a19a-4c71-a893-b6ee8277160d-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.297716 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-58dd9ff6bc-htt6q"] Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.309902 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-58dd9ff6bc-htt6q"] Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.329401 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.331320 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.338409 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.341549 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.355367 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a4997378-3318-4e89-8210-5bfefe2d9467-dns-swift-storage-0\") pod \"dnsmasq-dns-5c9776ccc5-2z8db\" (UID: \"a4997378-3318-4e89-8210-5bfefe2d9467\") " pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.355414 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5k8tj\" (UniqueName: \"kubernetes.io/projected/a4997378-3318-4e89-8210-5bfefe2d9467-kube-api-access-5k8tj\") pod \"dnsmasq-dns-5c9776ccc5-2z8db\" (UID: \"a4997378-3318-4e89-8210-5bfefe2d9467\") " pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.355588 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7fa1244c-f57b-4679-8845-ad503d45384f-scripts\") pod \"cinder-scheduler-0\" (UID: \"7fa1244c-f57b-4679-8845-ad503d45384f\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.355639 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a4997378-3318-4e89-8210-5bfefe2d9467-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9776ccc5-2z8db\" (UID: \"a4997378-3318-4e89-8210-5bfefe2d9467\") " pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.355679 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7fa1244c-f57b-4679-8845-ad503d45384f-config-data\") pod \"cinder-scheduler-0\" (UID: \"7fa1244c-f57b-4679-8845-ad503d45384f\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.355735 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7fa1244c-f57b-4679-8845-ad503d45384f-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"7fa1244c-f57b-4679-8845-ad503d45384f\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.355787 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a4997378-3318-4e89-8210-5bfefe2d9467-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9776ccc5-2z8db\" (UID: \"a4997378-3318-4e89-8210-5bfefe2d9467\") " pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.355810 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4997378-3318-4e89-8210-5bfefe2d9467-config\") pod \"dnsmasq-dns-5c9776ccc5-2z8db\" (UID: \"a4997378-3318-4e89-8210-5bfefe2d9467\") " pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.355877 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a4997378-3318-4e89-8210-5bfefe2d9467-dns-svc\") pod \"dnsmasq-dns-5c9776ccc5-2z8db\" (UID: \"a4997378-3318-4e89-8210-5bfefe2d9467\") " pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.355906 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fa1244c-f57b-4679-8845-ad503d45384f-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"7fa1244c-f57b-4679-8845-ad503d45384f\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.355930 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5zkk\" (UniqueName: \"kubernetes.io/projected/7fa1244c-f57b-4679-8845-ad503d45384f-kube-api-access-h5zkk\") pod \"cinder-scheduler-0\" (UID: \"7fa1244c-f57b-4679-8845-ad503d45384f\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.355948 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7fa1244c-f57b-4679-8845-ad503d45384f-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"7fa1244c-f57b-4679-8845-ad503d45384f\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.356549 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a4997378-3318-4e89-8210-5bfefe2d9467-dns-swift-storage-0\") pod \"dnsmasq-dns-5c9776ccc5-2z8db\" (UID: \"a4997378-3318-4e89-8210-5bfefe2d9467\") " pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.357072 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a4997378-3318-4e89-8210-5bfefe2d9467-dns-svc\") pod \"dnsmasq-dns-5c9776ccc5-2z8db\" (UID: \"a4997378-3318-4e89-8210-5bfefe2d9467\") " pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.357447 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a4997378-3318-4e89-8210-5bfefe2d9467-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9776ccc5-2z8db\" (UID: \"a4997378-3318-4e89-8210-5bfefe2d9467\") " pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.357638 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4997378-3318-4e89-8210-5bfefe2d9467-config\") pod \"dnsmasq-dns-5c9776ccc5-2z8db\" (UID: \"a4997378-3318-4e89-8210-5bfefe2d9467\") " pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.358230 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7fa1244c-f57b-4679-8845-ad503d45384f-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"7fa1244c-f57b-4679-8845-ad503d45384f\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.358785 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a4997378-3318-4e89-8210-5bfefe2d9467-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9776ccc5-2z8db\" (UID: \"a4997378-3318-4e89-8210-5bfefe2d9467\") " pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.362466 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7fa1244c-f57b-4679-8845-ad503d45384f-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"7fa1244c-f57b-4679-8845-ad503d45384f\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.362645 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fa1244c-f57b-4679-8845-ad503d45384f-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"7fa1244c-f57b-4679-8845-ad503d45384f\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.364985 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7fa1244c-f57b-4679-8845-ad503d45384f-scripts\") pod \"cinder-scheduler-0\" (UID: \"7fa1244c-f57b-4679-8845-ad503d45384f\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.369817 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7fa1244c-f57b-4679-8845-ad503d45384f-config-data\") pod \"cinder-scheduler-0\" (UID: \"7fa1244c-f57b-4679-8845-ad503d45384f\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.377509 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5zkk\" (UniqueName: \"kubernetes.io/projected/7fa1244c-f57b-4679-8845-ad503d45384f-kube-api-access-h5zkk\") pod \"cinder-scheduler-0\" (UID: \"7fa1244c-f57b-4679-8845-ad503d45384f\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.378253 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5k8tj\" (UniqueName: \"kubernetes.io/projected/a4997378-3318-4e89-8210-5bfefe2d9467-kube-api-access-5k8tj\") pod \"dnsmasq-dns-5c9776ccc5-2z8db\" (UID: \"a4997378-3318-4e89-8210-5bfefe2d9467\") " pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.455064 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.457798 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c4f9de8-7b0d-4531-86bc-e476df671b79-config-data\") pod \"cinder-api-0\" (UID: \"8c4f9de8-7b0d-4531-86bc-e476df671b79\") " pod="openstack/cinder-api-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.457842 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrcj7\" (UniqueName: \"kubernetes.io/projected/8c4f9de8-7b0d-4531-86bc-e476df671b79-kube-api-access-wrcj7\") pod \"cinder-api-0\" (UID: \"8c4f9de8-7b0d-4531-86bc-e476df671b79\") " pod="openstack/cinder-api-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.457880 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8c4f9de8-7b0d-4531-86bc-e476df671b79-config-data-custom\") pod \"cinder-api-0\" (UID: \"8c4f9de8-7b0d-4531-86bc-e476df671b79\") " pod="openstack/cinder-api-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.457909 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c4f9de8-7b0d-4531-86bc-e476df671b79-scripts\") pod \"cinder-api-0\" (UID: \"8c4f9de8-7b0d-4531-86bc-e476df671b79\") " pod="openstack/cinder-api-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.457960 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8c4f9de8-7b0d-4531-86bc-e476df671b79-etc-machine-id\") pod \"cinder-api-0\" (UID: \"8c4f9de8-7b0d-4531-86bc-e476df671b79\") " pod="openstack/cinder-api-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.458034 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c4f9de8-7b0d-4531-86bc-e476df671b79-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"8c4f9de8-7b0d-4531-86bc-e476df671b79\") " pod="openstack/cinder-api-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.458062 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8c4f9de8-7b0d-4531-86bc-e476df671b79-logs\") pod \"cinder-api-0\" (UID: \"8c4f9de8-7b0d-4531-86bc-e476df671b79\") " pod="openstack/cinder-api-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.512612 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.562323 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrcj7\" (UniqueName: \"kubernetes.io/projected/8c4f9de8-7b0d-4531-86bc-e476df671b79-kube-api-access-wrcj7\") pod \"cinder-api-0\" (UID: \"8c4f9de8-7b0d-4531-86bc-e476df671b79\") " pod="openstack/cinder-api-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.562625 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8c4f9de8-7b0d-4531-86bc-e476df671b79-config-data-custom\") pod \"cinder-api-0\" (UID: \"8c4f9de8-7b0d-4531-86bc-e476df671b79\") " pod="openstack/cinder-api-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.562659 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c4f9de8-7b0d-4531-86bc-e476df671b79-scripts\") pod \"cinder-api-0\" (UID: \"8c4f9de8-7b0d-4531-86bc-e476df671b79\") " pod="openstack/cinder-api-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.562746 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8c4f9de8-7b0d-4531-86bc-e476df671b79-etc-machine-id\") pod \"cinder-api-0\" (UID: \"8c4f9de8-7b0d-4531-86bc-e476df671b79\") " pod="openstack/cinder-api-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.562847 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c4f9de8-7b0d-4531-86bc-e476df671b79-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"8c4f9de8-7b0d-4531-86bc-e476df671b79\") " pod="openstack/cinder-api-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.563013 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8c4f9de8-7b0d-4531-86bc-e476df671b79-logs\") pod \"cinder-api-0\" (UID: \"8c4f9de8-7b0d-4531-86bc-e476df671b79\") " pod="openstack/cinder-api-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.563052 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c4f9de8-7b0d-4531-86bc-e476df671b79-config-data\") pod \"cinder-api-0\" (UID: \"8c4f9de8-7b0d-4531-86bc-e476df671b79\") " pod="openstack/cinder-api-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.562909 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8c4f9de8-7b0d-4531-86bc-e476df671b79-etc-machine-id\") pod \"cinder-api-0\" (UID: \"8c4f9de8-7b0d-4531-86bc-e476df671b79\") " pod="openstack/cinder-api-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.573725 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8c4f9de8-7b0d-4531-86bc-e476df671b79-logs\") pod \"cinder-api-0\" (UID: \"8c4f9de8-7b0d-4531-86bc-e476df671b79\") " pod="openstack/cinder-api-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.599983 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8c4f9de8-7b0d-4531-86bc-e476df671b79-config-data-custom\") pod \"cinder-api-0\" (UID: \"8c4f9de8-7b0d-4531-86bc-e476df671b79\") " pod="openstack/cinder-api-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.600963 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrcj7\" (UniqueName: \"kubernetes.io/projected/8c4f9de8-7b0d-4531-86bc-e476df671b79-kube-api-access-wrcj7\") pod \"cinder-api-0\" (UID: \"8c4f9de8-7b0d-4531-86bc-e476df671b79\") " pod="openstack/cinder-api-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.603481 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c4f9de8-7b0d-4531-86bc-e476df671b79-scripts\") pod \"cinder-api-0\" (UID: \"8c4f9de8-7b0d-4531-86bc-e476df671b79\") " pod="openstack/cinder-api-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.605936 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c4f9de8-7b0d-4531-86bc-e476df671b79-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"8c4f9de8-7b0d-4531-86bc-e476df671b79\") " pod="openstack/cinder-api-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.613186 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c4f9de8-7b0d-4531-86bc-e476df671b79-config-data\") pod \"cinder-api-0\" (UID: \"8c4f9de8-7b0d-4531-86bc-e476df671b79\") " pod="openstack/cinder-api-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.664865 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.996599 4903 generic.go:334] "Generic (PLEG): container finished" podID="e3b2305d-da0f-4efb-9ac8-3df8527f9dec" containerID="8798c19cea002eaec469e42f015418a53f1410a66e2cfa418d74c87761310f3d" exitCode=0 Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.996915 4903 generic.go:334] "Generic (PLEG): container finished" podID="e3b2305d-da0f-4efb-9ac8-3df8527f9dec" containerID="c6fe33af25f2479439e622058688ae8cb76e78f88ed71095ccafa36430df3db2" exitCode=2 Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.998199 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e3b2305d-da0f-4efb-9ac8-3df8527f9dec","Type":"ContainerDied","Data":"8798c19cea002eaec469e42f015418a53f1410a66e2cfa418d74c87761310f3d"} Nov 26 22:42:37 crc kubenswrapper[4903]: I1126 22:42:37.998229 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e3b2305d-da0f-4efb-9ac8-3df8527f9dec","Type":"ContainerDied","Data":"c6fe33af25f2479439e622058688ae8cb76e78f88ed71095ccafa36430df3db2"} Nov 26 22:42:38 crc kubenswrapper[4903]: I1126 22:42:38.079344 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01971aea-a19a-4c71-a893-b6ee8277160d" path="/var/lib/kubelet/pods/01971aea-a19a-4c71-a893-b6ee8277160d/volumes" Nov 26 22:42:38 crc kubenswrapper[4903]: I1126 22:42:38.131648 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 22:42:38 crc kubenswrapper[4903]: I1126 22:42:38.403940 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 22:42:38 crc kubenswrapper[4903]: W1126 22:42:38.404933 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8c4f9de8_7b0d_4531_86bc_e476df671b79.slice/crio-2369591f0d4aae87419fb920590af9f08352df47c6625a50be7f1c84057bd52b WatchSource:0}: Error finding container 2369591f0d4aae87419fb920590af9f08352df47c6625a50be7f1c84057bd52b: Status 404 returned error can't find the container with id 2369591f0d4aae87419fb920590af9f08352df47c6625a50be7f1c84057bd52b Nov 26 22:42:38 crc kubenswrapper[4903]: I1126 22:42:38.443809 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-2z8db"] Nov 26 22:42:38 crc kubenswrapper[4903]: W1126 22:42:38.451455 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda4997378_3318_4e89_8210_5bfefe2d9467.slice/crio-628890ae32a188788ab49bef6d25e27a5d8f559fdadbf18eaa70ead3f43c3f8f WatchSource:0}: Error finding container 628890ae32a188788ab49bef6d25e27a5d8f559fdadbf18eaa70ead3f43c3f8f: Status 404 returned error can't find the container with id 628890ae32a188788ab49bef6d25e27a5d8f559fdadbf18eaa70ead3f43c3f8f Nov 26 22:42:38 crc kubenswrapper[4903]: I1126 22:42:38.565288 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-674b64b8b-7ztpj" Nov 26 22:42:39 crc kubenswrapper[4903]: I1126 22:42:39.035783 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"7fa1244c-f57b-4679-8845-ad503d45384f","Type":"ContainerStarted","Data":"be99447960da141d7f7e3073e39bfcbe94862bc80e4f48e48877f9f3bf689f3d"} Nov 26 22:42:39 crc kubenswrapper[4903]: I1126 22:42:39.040472 4903 generic.go:334] "Generic (PLEG): container finished" podID="e3b2305d-da0f-4efb-9ac8-3df8527f9dec" containerID="80fae27da19b974cb9e0b08887be36e3c9e401e504cd21caf4c2b5afbf2df925" exitCode=0 Nov 26 22:42:39 crc kubenswrapper[4903]: I1126 22:42:39.040545 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e3b2305d-da0f-4efb-9ac8-3df8527f9dec","Type":"ContainerDied","Data":"80fae27da19b974cb9e0b08887be36e3c9e401e504cd21caf4c2b5afbf2df925"} Nov 26 22:42:39 crc kubenswrapper[4903]: I1126 22:42:39.041844 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8c4f9de8-7b0d-4531-86bc-e476df671b79","Type":"ContainerStarted","Data":"2369591f0d4aae87419fb920590af9f08352df47c6625a50be7f1c84057bd52b"} Nov 26 22:42:39 crc kubenswrapper[4903]: I1126 22:42:39.043746 4903 generic.go:334] "Generic (PLEG): container finished" podID="a4997378-3318-4e89-8210-5bfefe2d9467" containerID="a44e6fc0addd81184eca5edcd4afc3e86d08d7c469ecfef8ed674f2626094baa" exitCode=0 Nov 26 22:42:39 crc kubenswrapper[4903]: I1126 22:42:39.043766 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" event={"ID":"a4997378-3318-4e89-8210-5bfefe2d9467","Type":"ContainerDied","Data":"a44e6fc0addd81184eca5edcd4afc3e86d08d7c469ecfef8ed674f2626094baa"} Nov 26 22:42:39 crc kubenswrapper[4903]: I1126 22:42:39.043780 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" event={"ID":"a4997378-3318-4e89-8210-5bfefe2d9467","Type":"ContainerStarted","Data":"628890ae32a188788ab49bef6d25e27a5d8f559fdadbf18eaa70ead3f43c3f8f"} Nov 26 22:42:39 crc kubenswrapper[4903]: I1126 22:42:39.055291 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 22:42:39 crc kubenswrapper[4903]: I1126 22:42:39.055330 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 22:42:39 crc kubenswrapper[4903]: I1126 22:42:39.089020 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-674b64b8b-7ztpj" Nov 26 22:42:39 crc kubenswrapper[4903]: I1126 22:42:39.122560 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 22:42:39 crc kubenswrapper[4903]: I1126 22:42:39.122646 4903 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 22:42:39 crc kubenswrapper[4903]: I1126 22:42:39.137755 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 22:42:39 crc kubenswrapper[4903]: E1126 22:42:39.679044 4903 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda4997378_3318_4e89_8210_5bfefe2d9467.slice/crio-conmon-a44e6fc0addd81184eca5edcd4afc3e86d08d7c469ecfef8ed674f2626094baa.scope\": RecentStats: unable to find data in memory cache]" Nov 26 22:42:40 crc kubenswrapper[4903]: I1126 22:42:40.480825 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:42:40 crc kubenswrapper[4903]: I1126 22:42:40.607094 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-log-httpd\") pod \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\" (UID: \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\") " Nov 26 22:42:40 crc kubenswrapper[4903]: I1126 22:42:40.607138 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-config-data\") pod \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\" (UID: \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\") " Nov 26 22:42:40 crc kubenswrapper[4903]: I1126 22:42:40.607614 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "e3b2305d-da0f-4efb-9ac8-3df8527f9dec" (UID: "e3b2305d-da0f-4efb-9ac8-3df8527f9dec"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:42:40 crc kubenswrapper[4903]: I1126 22:42:40.608173 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "e3b2305d-da0f-4efb-9ac8-3df8527f9dec" (UID: "e3b2305d-da0f-4efb-9ac8-3df8527f9dec"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:42:40 crc kubenswrapper[4903]: I1126 22:42:40.608493 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-run-httpd\") pod \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\" (UID: \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\") " Nov 26 22:42:40 crc kubenswrapper[4903]: I1126 22:42:40.608781 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-sg-core-conf-yaml\") pod \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\" (UID: \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\") " Nov 26 22:42:40 crc kubenswrapper[4903]: I1126 22:42:40.608948 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-84g89\" (UniqueName: \"kubernetes.io/projected/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-kube-api-access-84g89\") pod \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\" (UID: \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\") " Nov 26 22:42:40 crc kubenswrapper[4903]: I1126 22:42:40.609001 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-combined-ca-bundle\") pod \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\" (UID: \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\") " Nov 26 22:42:40 crc kubenswrapper[4903]: I1126 22:42:40.609134 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-scripts\") pod \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\" (UID: \"e3b2305d-da0f-4efb-9ac8-3df8527f9dec\") " Nov 26 22:42:40 crc kubenswrapper[4903]: I1126 22:42:40.609705 4903 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:40 crc kubenswrapper[4903]: I1126 22:42:40.609722 4903 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:40 crc kubenswrapper[4903]: I1126 22:42:40.616855 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-kube-api-access-84g89" (OuterVolumeSpecName: "kube-api-access-84g89") pod "e3b2305d-da0f-4efb-9ac8-3df8527f9dec" (UID: "e3b2305d-da0f-4efb-9ac8-3df8527f9dec"). InnerVolumeSpecName "kube-api-access-84g89". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:42:40 crc kubenswrapper[4903]: I1126 22:42:40.619112 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-scripts" (OuterVolumeSpecName: "scripts") pod "e3b2305d-da0f-4efb-9ac8-3df8527f9dec" (UID: "e3b2305d-da0f-4efb-9ac8-3df8527f9dec"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:40 crc kubenswrapper[4903]: I1126 22:42:40.672177 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 26 22:42:40 crc kubenswrapper[4903]: I1126 22:42:40.699507 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "e3b2305d-da0f-4efb-9ac8-3df8527f9dec" (UID: "e3b2305d-da0f-4efb-9ac8-3df8527f9dec"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:40 crc kubenswrapper[4903]: I1126 22:42:40.700341 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e3b2305d-da0f-4efb-9ac8-3df8527f9dec" (UID: "e3b2305d-da0f-4efb-9ac8-3df8527f9dec"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:40 crc kubenswrapper[4903]: I1126 22:42:40.711393 4903 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:40 crc kubenswrapper[4903]: I1126 22:42:40.711424 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-84g89\" (UniqueName: \"kubernetes.io/projected/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-kube-api-access-84g89\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:40 crc kubenswrapper[4903]: I1126 22:42:40.711436 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:40 crc kubenswrapper[4903]: I1126 22:42:40.711446 4903 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:40 crc kubenswrapper[4903]: I1126 22:42:40.738410 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-config-data" (OuterVolumeSpecName: "config-data") pod "e3b2305d-da0f-4efb-9ac8-3df8527f9dec" (UID: "e3b2305d-da0f-4efb-9ac8-3df8527f9dec"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:40 crc kubenswrapper[4903]: I1126 22:42:40.813892 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3b2305d-da0f-4efb-9ac8-3df8527f9dec-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.088729 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e3b2305d-da0f-4efb-9ac8-3df8527f9dec","Type":"ContainerDied","Data":"a567cbf60e1fd4b8a4c3ebd3638f1ffd0ec5f5f3556bbb6c496720e9592ee96c"} Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.088755 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.089073 4903 scope.go:117] "RemoveContainer" containerID="8798c19cea002eaec469e42f015418a53f1410a66e2cfa418d74c87761310f3d" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.093082 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8c4f9de8-7b0d-4531-86bc-e476df671b79","Type":"ContainerStarted","Data":"569f47c5e6b57d863619a9c700b597d8b664e67217dc8127fa94d0737d7b8506"} Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.095110 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" event={"ID":"a4997378-3318-4e89-8210-5bfefe2d9467","Type":"ContainerStarted","Data":"6f42106ab7746f6bff7560cd467fbcd734f1a3712d4510b0ba83270d697f91b3"} Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.096735 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.100438 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"7fa1244c-f57b-4679-8845-ad503d45384f","Type":"ContainerStarted","Data":"9757fdc6186f38754d3ae40d93c75c3c4cfa54eeef390b6bdf2e68fa563fd09e"} Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.123564 4903 scope.go:117] "RemoveContainer" containerID="c6fe33af25f2479439e622058688ae8cb76e78f88ed71095ccafa36430df3db2" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.126371 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" podStartSLOduration=4.126349033 podStartE2EDuration="4.126349033s" podCreationTimestamp="2025-11-26 22:42:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:42:41.1143018 +0000 UTC m=+1289.804536710" watchObservedRunningTime="2025-11-26 22:42:41.126349033 +0000 UTC m=+1289.816583943" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.187420 4903 scope.go:117] "RemoveContainer" containerID="80fae27da19b974cb9e0b08887be36e3c9e401e504cd21caf4c2b5afbf2df925" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.244603 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.276785 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.294137 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:42:41 crc kubenswrapper[4903]: E1126 22:42:41.294584 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3b2305d-da0f-4efb-9ac8-3df8527f9dec" containerName="sg-core" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.294597 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3b2305d-da0f-4efb-9ac8-3df8527f9dec" containerName="sg-core" Nov 26 22:42:41 crc kubenswrapper[4903]: E1126 22:42:41.294626 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3b2305d-da0f-4efb-9ac8-3df8527f9dec" containerName="proxy-httpd" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.294632 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3b2305d-da0f-4efb-9ac8-3df8527f9dec" containerName="proxy-httpd" Nov 26 22:42:41 crc kubenswrapper[4903]: E1126 22:42:41.294657 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3b2305d-da0f-4efb-9ac8-3df8527f9dec" containerName="ceilometer-notification-agent" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.294663 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3b2305d-da0f-4efb-9ac8-3df8527f9dec" containerName="ceilometer-notification-agent" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.294863 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3b2305d-da0f-4efb-9ac8-3df8527f9dec" containerName="ceilometer-notification-agent" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.294874 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3b2305d-da0f-4efb-9ac8-3df8527f9dec" containerName="sg-core" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.294890 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3b2305d-da0f-4efb-9ac8-3df8527f9dec" containerName="proxy-httpd" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.304788 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.307997 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.308896 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.312615 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.429915 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\") " pod="openstack/ceilometer-0" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.430221 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-scripts\") pod \"ceilometer-0\" (UID: \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\") " pod="openstack/ceilometer-0" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.430261 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\") " pod="openstack/ceilometer-0" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.430280 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9x9j4\" (UniqueName: \"kubernetes.io/projected/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-kube-api-access-9x9j4\") pod \"ceilometer-0\" (UID: \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\") " pod="openstack/ceilometer-0" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.430313 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-run-httpd\") pod \"ceilometer-0\" (UID: \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\") " pod="openstack/ceilometer-0" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.430363 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-log-httpd\") pod \"ceilometer-0\" (UID: \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\") " pod="openstack/ceilometer-0" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.430378 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-config-data\") pod \"ceilometer-0\" (UID: \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\") " pod="openstack/ceilometer-0" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.531634 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-log-httpd\") pod \"ceilometer-0\" (UID: \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\") " pod="openstack/ceilometer-0" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.531675 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-config-data\") pod \"ceilometer-0\" (UID: \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\") " pod="openstack/ceilometer-0" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.531782 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\") " pod="openstack/ceilometer-0" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.531832 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-scripts\") pod \"ceilometer-0\" (UID: \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\") " pod="openstack/ceilometer-0" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.531865 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\") " pod="openstack/ceilometer-0" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.531883 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9x9j4\" (UniqueName: \"kubernetes.io/projected/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-kube-api-access-9x9j4\") pod \"ceilometer-0\" (UID: \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\") " pod="openstack/ceilometer-0" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.531918 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-run-httpd\") pod \"ceilometer-0\" (UID: \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\") " pod="openstack/ceilometer-0" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.532381 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-run-httpd\") pod \"ceilometer-0\" (UID: \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\") " pod="openstack/ceilometer-0" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.532583 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-log-httpd\") pod \"ceilometer-0\" (UID: \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\") " pod="openstack/ceilometer-0" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.536464 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-scripts\") pod \"ceilometer-0\" (UID: \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\") " pod="openstack/ceilometer-0" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.537824 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\") " pod="openstack/ceilometer-0" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.540201 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\") " pod="openstack/ceilometer-0" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.552444 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9x9j4\" (UniqueName: \"kubernetes.io/projected/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-kube-api-access-9x9j4\") pod \"ceilometer-0\" (UID: \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\") " pod="openstack/ceilometer-0" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.559209 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-config-data\") pod \"ceilometer-0\" (UID: \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\") " pod="openstack/ceilometer-0" Nov 26 22:42:41 crc kubenswrapper[4903]: I1126 22:42:41.620502 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:42:42 crc kubenswrapper[4903]: I1126 22:42:42.045169 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3b2305d-da0f-4efb-9ac8-3df8527f9dec" path="/var/lib/kubelet/pods/e3b2305d-da0f-4efb-9ac8-3df8527f9dec/volumes" Nov 26 22:42:42 crc kubenswrapper[4903]: I1126 22:42:42.111796 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"7fa1244c-f57b-4679-8845-ad503d45384f","Type":"ContainerStarted","Data":"12412c8199d980c80b79cd4963192d726eaadf4dbd064bf19c473fa062ca7b7b"} Nov 26 22:42:42 crc kubenswrapper[4903]: I1126 22:42:42.116039 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8c4f9de8-7b0d-4531-86bc-e476df671b79","Type":"ContainerStarted","Data":"05d37a6dddee8adb2c15338ba5c6cd4b0f3027ba48c4d263d7380b2e945f97be"} Nov 26 22:42:42 crc kubenswrapper[4903]: I1126 22:42:42.116511 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 26 22:42:42 crc kubenswrapper[4903]: I1126 22:42:42.116573 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="8c4f9de8-7b0d-4531-86bc-e476df671b79" containerName="cinder-api" containerID="cri-o://05d37a6dddee8adb2c15338ba5c6cd4b0f3027ba48c4d263d7380b2e945f97be" gracePeriod=30 Nov 26 22:42:42 crc kubenswrapper[4903]: I1126 22:42:42.116519 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="8c4f9de8-7b0d-4531-86bc-e476df671b79" containerName="cinder-api-log" containerID="cri-o://569f47c5e6b57d863619a9c700b597d8b664e67217dc8127fa94d0737d7b8506" gracePeriod=30 Nov 26 22:42:42 crc kubenswrapper[4903]: I1126 22:42:42.136771 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:42:42 crc kubenswrapper[4903]: I1126 22:42:42.140927 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=2.948665177 podStartE2EDuration="5.14087934s" podCreationTimestamp="2025-11-26 22:42:37 +0000 UTC" firstStartedPulling="2025-11-26 22:42:38.148816018 +0000 UTC m=+1286.839050928" lastFinishedPulling="2025-11-26 22:42:40.341030181 +0000 UTC m=+1289.031265091" observedRunningTime="2025-11-26 22:42:42.128829528 +0000 UTC m=+1290.819064438" watchObservedRunningTime="2025-11-26 22:42:42.14087934 +0000 UTC m=+1290.831114250" Nov 26 22:42:42 crc kubenswrapper[4903]: I1126 22:42:42.160048 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=5.160033333 podStartE2EDuration="5.160033333s" podCreationTimestamp="2025-11-26 22:42:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:42:42.147780285 +0000 UTC m=+1290.838015195" watchObservedRunningTime="2025-11-26 22:42:42.160033333 +0000 UTC m=+1290.850268243" Nov 26 22:42:42 crc kubenswrapper[4903]: I1126 22:42:42.455562 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 26 22:42:42 crc kubenswrapper[4903]: I1126 22:42:42.801068 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 22:42:42 crc kubenswrapper[4903]: I1126 22:42:42.965717 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-67dffd5468-nzpt8" Nov 26 22:42:42 crc kubenswrapper[4903]: I1126 22:42:42.972181 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8c4f9de8-7b0d-4531-86bc-e476df671b79-config-data-custom\") pod \"8c4f9de8-7b0d-4531-86bc-e476df671b79\" (UID: \"8c4f9de8-7b0d-4531-86bc-e476df671b79\") " Nov 26 22:42:42 crc kubenswrapper[4903]: I1126 22:42:42.972256 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8c4f9de8-7b0d-4531-86bc-e476df671b79-logs\") pod \"8c4f9de8-7b0d-4531-86bc-e476df671b79\" (UID: \"8c4f9de8-7b0d-4531-86bc-e476df671b79\") " Nov 26 22:42:42 crc kubenswrapper[4903]: I1126 22:42:42.972311 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wrcj7\" (UniqueName: \"kubernetes.io/projected/8c4f9de8-7b0d-4531-86bc-e476df671b79-kube-api-access-wrcj7\") pod \"8c4f9de8-7b0d-4531-86bc-e476df671b79\" (UID: \"8c4f9de8-7b0d-4531-86bc-e476df671b79\") " Nov 26 22:42:42 crc kubenswrapper[4903]: I1126 22:42:42.972385 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c4f9de8-7b0d-4531-86bc-e476df671b79-combined-ca-bundle\") pod \"8c4f9de8-7b0d-4531-86bc-e476df671b79\" (UID: \"8c4f9de8-7b0d-4531-86bc-e476df671b79\") " Nov 26 22:42:42 crc kubenswrapper[4903]: I1126 22:42:42.972461 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c4f9de8-7b0d-4531-86bc-e476df671b79-config-data\") pod \"8c4f9de8-7b0d-4531-86bc-e476df671b79\" (UID: \"8c4f9de8-7b0d-4531-86bc-e476df671b79\") " Nov 26 22:42:42 crc kubenswrapper[4903]: I1126 22:42:42.972518 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8c4f9de8-7b0d-4531-86bc-e476df671b79-etc-machine-id\") pod \"8c4f9de8-7b0d-4531-86bc-e476df671b79\" (UID: \"8c4f9de8-7b0d-4531-86bc-e476df671b79\") " Nov 26 22:42:42 crc kubenswrapper[4903]: I1126 22:42:42.972539 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c4f9de8-7b0d-4531-86bc-e476df671b79-scripts\") pod \"8c4f9de8-7b0d-4531-86bc-e476df671b79\" (UID: \"8c4f9de8-7b0d-4531-86bc-e476df671b79\") " Nov 26 22:42:42 crc kubenswrapper[4903]: I1126 22:42:42.972850 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8c4f9de8-7b0d-4531-86bc-e476df671b79-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "8c4f9de8-7b0d-4531-86bc-e476df671b79" (UID: "8c4f9de8-7b0d-4531-86bc-e476df671b79"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:42:42 crc kubenswrapper[4903]: I1126 22:42:42.973280 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8c4f9de8-7b0d-4531-86bc-e476df671b79-logs" (OuterVolumeSpecName: "logs") pod "8c4f9de8-7b0d-4531-86bc-e476df671b79" (UID: "8c4f9de8-7b0d-4531-86bc-e476df671b79"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:42:42 crc kubenswrapper[4903]: I1126 22:42:42.973303 4903 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8c4f9de8-7b0d-4531-86bc-e476df671b79-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:42 crc kubenswrapper[4903]: I1126 22:42:42.977057 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c4f9de8-7b0d-4531-86bc-e476df671b79-kube-api-access-wrcj7" (OuterVolumeSpecName: "kube-api-access-wrcj7") pod "8c4f9de8-7b0d-4531-86bc-e476df671b79" (UID: "8c4f9de8-7b0d-4531-86bc-e476df671b79"). InnerVolumeSpecName "kube-api-access-wrcj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:42:42 crc kubenswrapper[4903]: I1126 22:42:42.979778 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c4f9de8-7b0d-4531-86bc-e476df671b79-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "8c4f9de8-7b0d-4531-86bc-e476df671b79" (UID: "8c4f9de8-7b0d-4531-86bc-e476df671b79"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:42 crc kubenswrapper[4903]: I1126 22:42:42.993060 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c4f9de8-7b0d-4531-86bc-e476df671b79-scripts" (OuterVolumeSpecName: "scripts") pod "8c4f9de8-7b0d-4531-86bc-e476df671b79" (UID: "8c4f9de8-7b0d-4531-86bc-e476df671b79"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.009259 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c4f9de8-7b0d-4531-86bc-e476df671b79-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8c4f9de8-7b0d-4531-86bc-e476df671b79" (UID: "8c4f9de8-7b0d-4531-86bc-e476df671b79"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.055140 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c4f9de8-7b0d-4531-86bc-e476df671b79-config-data" (OuterVolumeSpecName: "config-data") pod "8c4f9de8-7b0d-4531-86bc-e476df671b79" (UID: "8c4f9de8-7b0d-4531-86bc-e476df671b79"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.075449 4903 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8c4f9de8-7b0d-4531-86bc-e476df671b79-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.075670 4903 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8c4f9de8-7b0d-4531-86bc-e476df671b79-logs\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.075682 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wrcj7\" (UniqueName: \"kubernetes.io/projected/8c4f9de8-7b0d-4531-86bc-e476df671b79-kube-api-access-wrcj7\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.075713 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c4f9de8-7b0d-4531-86bc-e476df671b79-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.075723 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c4f9de8-7b0d-4531-86bc-e476df671b79-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.075732 4903 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c4f9de8-7b0d-4531-86bc-e476df671b79-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.130212 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3f11fc1d-817f-4c2a-a777-a4ac0f568b07","Type":"ContainerStarted","Data":"860e8d94bfb9963660da2caaf575ea05f7444de561aa082ad9dd084bd0272218"} Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.130255 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3f11fc1d-817f-4c2a-a777-a4ac0f568b07","Type":"ContainerStarted","Data":"5f7e9d96fa81e7cec883018d4f5c4f4378b58bb18f7d2020392a4633f4eb9e73"} Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.132777 4903 generic.go:334] "Generic (PLEG): container finished" podID="8c4f9de8-7b0d-4531-86bc-e476df671b79" containerID="05d37a6dddee8adb2c15338ba5c6cd4b0f3027ba48c4d263d7380b2e945f97be" exitCode=0 Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.132806 4903 generic.go:334] "Generic (PLEG): container finished" podID="8c4f9de8-7b0d-4531-86bc-e476df671b79" containerID="569f47c5e6b57d863619a9c700b597d8b664e67217dc8127fa94d0737d7b8506" exitCode=143 Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.132881 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.132900 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8c4f9de8-7b0d-4531-86bc-e476df671b79","Type":"ContainerDied","Data":"05d37a6dddee8adb2c15338ba5c6cd4b0f3027ba48c4d263d7380b2e945f97be"} Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.132953 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8c4f9de8-7b0d-4531-86bc-e476df671b79","Type":"ContainerDied","Data":"569f47c5e6b57d863619a9c700b597d8b664e67217dc8127fa94d0737d7b8506"} Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.132968 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8c4f9de8-7b0d-4531-86bc-e476df671b79","Type":"ContainerDied","Data":"2369591f0d4aae87419fb920590af9f08352df47c6625a50be7f1c84057bd52b"} Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.132987 4903 scope.go:117] "RemoveContainer" containerID="05d37a6dddee8adb2c15338ba5c6cd4b0f3027ba48c4d263d7380b2e945f97be" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.172041 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.187938 4903 scope.go:117] "RemoveContainer" containerID="569f47c5e6b57d863619a9c700b597d8b664e67217dc8127fa94d0737d7b8506" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.204742 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.229744 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 26 22:42:43 crc kubenswrapper[4903]: E1126 22:42:43.230187 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c4f9de8-7b0d-4531-86bc-e476df671b79" containerName="cinder-api" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.230208 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c4f9de8-7b0d-4531-86bc-e476df671b79" containerName="cinder-api" Nov 26 22:42:43 crc kubenswrapper[4903]: E1126 22:42:43.230249 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c4f9de8-7b0d-4531-86bc-e476df671b79" containerName="cinder-api-log" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.230257 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c4f9de8-7b0d-4531-86bc-e476df671b79" containerName="cinder-api-log" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.230466 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c4f9de8-7b0d-4531-86bc-e476df671b79" containerName="cinder-api" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.230487 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c4f9de8-7b0d-4531-86bc-e476df671b79" containerName="cinder-api-log" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.231568 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.235278 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.235601 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.235738 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.247713 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.254918 4903 scope.go:117] "RemoveContainer" containerID="05d37a6dddee8adb2c15338ba5c6cd4b0f3027ba48c4d263d7380b2e945f97be" Nov 26 22:42:43 crc kubenswrapper[4903]: E1126 22:42:43.257833 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"05d37a6dddee8adb2c15338ba5c6cd4b0f3027ba48c4d263d7380b2e945f97be\": container with ID starting with 05d37a6dddee8adb2c15338ba5c6cd4b0f3027ba48c4d263d7380b2e945f97be not found: ID does not exist" containerID="05d37a6dddee8adb2c15338ba5c6cd4b0f3027ba48c4d263d7380b2e945f97be" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.257875 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05d37a6dddee8adb2c15338ba5c6cd4b0f3027ba48c4d263d7380b2e945f97be"} err="failed to get container status \"05d37a6dddee8adb2c15338ba5c6cd4b0f3027ba48c4d263d7380b2e945f97be\": rpc error: code = NotFound desc = could not find container \"05d37a6dddee8adb2c15338ba5c6cd4b0f3027ba48c4d263d7380b2e945f97be\": container with ID starting with 05d37a6dddee8adb2c15338ba5c6cd4b0f3027ba48c4d263d7380b2e945f97be not found: ID does not exist" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.257897 4903 scope.go:117] "RemoveContainer" containerID="569f47c5e6b57d863619a9c700b597d8b664e67217dc8127fa94d0737d7b8506" Nov 26 22:42:43 crc kubenswrapper[4903]: E1126 22:42:43.258186 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"569f47c5e6b57d863619a9c700b597d8b664e67217dc8127fa94d0737d7b8506\": container with ID starting with 569f47c5e6b57d863619a9c700b597d8b664e67217dc8127fa94d0737d7b8506 not found: ID does not exist" containerID="569f47c5e6b57d863619a9c700b597d8b664e67217dc8127fa94d0737d7b8506" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.258247 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"569f47c5e6b57d863619a9c700b597d8b664e67217dc8127fa94d0737d7b8506"} err="failed to get container status \"569f47c5e6b57d863619a9c700b597d8b664e67217dc8127fa94d0737d7b8506\": rpc error: code = NotFound desc = could not find container \"569f47c5e6b57d863619a9c700b597d8b664e67217dc8127fa94d0737d7b8506\": container with ID starting with 569f47c5e6b57d863619a9c700b597d8b664e67217dc8127fa94d0737d7b8506 not found: ID does not exist" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.258302 4903 scope.go:117] "RemoveContainer" containerID="05d37a6dddee8adb2c15338ba5c6cd4b0f3027ba48c4d263d7380b2e945f97be" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.258679 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05d37a6dddee8adb2c15338ba5c6cd4b0f3027ba48c4d263d7380b2e945f97be"} err="failed to get container status \"05d37a6dddee8adb2c15338ba5c6cd4b0f3027ba48c4d263d7380b2e945f97be\": rpc error: code = NotFound desc = could not find container \"05d37a6dddee8adb2c15338ba5c6cd4b0f3027ba48c4d263d7380b2e945f97be\": container with ID starting with 05d37a6dddee8adb2c15338ba5c6cd4b0f3027ba48c4d263d7380b2e945f97be not found: ID does not exist" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.258736 4903 scope.go:117] "RemoveContainer" containerID="569f47c5e6b57d863619a9c700b597d8b664e67217dc8127fa94d0737d7b8506" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.259169 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"569f47c5e6b57d863619a9c700b597d8b664e67217dc8127fa94d0737d7b8506"} err="failed to get container status \"569f47c5e6b57d863619a9c700b597d8b664e67217dc8127fa94d0737d7b8506\": rpc error: code = NotFound desc = could not find container \"569f47c5e6b57d863619a9c700b597d8b664e67217dc8127fa94d0737d7b8506\": container with ID starting with 569f47c5e6b57d863619a9c700b597d8b664e67217dc8127fa94d0737d7b8506 not found: ID does not exist" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.394363 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/172525bd-6c7f-4e76-b7b4-47c937c33a14-config-data-custom\") pod \"cinder-api-0\" (UID: \"172525bd-6c7f-4e76-b7b4-47c937c33a14\") " pod="openstack/cinder-api-0" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.394400 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/172525bd-6c7f-4e76-b7b4-47c937c33a14-etc-machine-id\") pod \"cinder-api-0\" (UID: \"172525bd-6c7f-4e76-b7b4-47c937c33a14\") " pod="openstack/cinder-api-0" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.394430 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/172525bd-6c7f-4e76-b7b4-47c937c33a14-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"172525bd-6c7f-4e76-b7b4-47c937c33a14\") " pod="openstack/cinder-api-0" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.394483 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/172525bd-6c7f-4e76-b7b4-47c937c33a14-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"172525bd-6c7f-4e76-b7b4-47c937c33a14\") " pod="openstack/cinder-api-0" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.394759 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/172525bd-6c7f-4e76-b7b4-47c937c33a14-logs\") pod \"cinder-api-0\" (UID: \"172525bd-6c7f-4e76-b7b4-47c937c33a14\") " pod="openstack/cinder-api-0" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.395029 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/172525bd-6c7f-4e76-b7b4-47c937c33a14-public-tls-certs\") pod \"cinder-api-0\" (UID: \"172525bd-6c7f-4e76-b7b4-47c937c33a14\") " pod="openstack/cinder-api-0" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.395291 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/172525bd-6c7f-4e76-b7b4-47c937c33a14-config-data\") pod \"cinder-api-0\" (UID: \"172525bd-6c7f-4e76-b7b4-47c937c33a14\") " pod="openstack/cinder-api-0" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.395480 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/172525bd-6c7f-4e76-b7b4-47c937c33a14-scripts\") pod \"cinder-api-0\" (UID: \"172525bd-6c7f-4e76-b7b4-47c937c33a14\") " pod="openstack/cinder-api-0" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.396208 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bzfvv\" (UniqueName: \"kubernetes.io/projected/172525bd-6c7f-4e76-b7b4-47c937c33a14-kube-api-access-bzfvv\") pod \"cinder-api-0\" (UID: \"172525bd-6c7f-4e76-b7b4-47c937c33a14\") " pod="openstack/cinder-api-0" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.529256 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/172525bd-6c7f-4e76-b7b4-47c937c33a14-public-tls-certs\") pod \"cinder-api-0\" (UID: \"172525bd-6c7f-4e76-b7b4-47c937c33a14\") " pod="openstack/cinder-api-0" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.529615 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/172525bd-6c7f-4e76-b7b4-47c937c33a14-config-data\") pod \"cinder-api-0\" (UID: \"172525bd-6c7f-4e76-b7b4-47c937c33a14\") " pod="openstack/cinder-api-0" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.529705 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/172525bd-6c7f-4e76-b7b4-47c937c33a14-scripts\") pod \"cinder-api-0\" (UID: \"172525bd-6c7f-4e76-b7b4-47c937c33a14\") " pod="openstack/cinder-api-0" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.529785 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bzfvv\" (UniqueName: \"kubernetes.io/projected/172525bd-6c7f-4e76-b7b4-47c937c33a14-kube-api-access-bzfvv\") pod \"cinder-api-0\" (UID: \"172525bd-6c7f-4e76-b7b4-47c937c33a14\") " pod="openstack/cinder-api-0" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.529825 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/172525bd-6c7f-4e76-b7b4-47c937c33a14-config-data-custom\") pod \"cinder-api-0\" (UID: \"172525bd-6c7f-4e76-b7b4-47c937c33a14\") " pod="openstack/cinder-api-0" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.529845 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/172525bd-6c7f-4e76-b7b4-47c937c33a14-etc-machine-id\") pod \"cinder-api-0\" (UID: \"172525bd-6c7f-4e76-b7b4-47c937c33a14\") " pod="openstack/cinder-api-0" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.529870 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/172525bd-6c7f-4e76-b7b4-47c937c33a14-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"172525bd-6c7f-4e76-b7b4-47c937c33a14\") " pod="openstack/cinder-api-0" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.529898 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/172525bd-6c7f-4e76-b7b4-47c937c33a14-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"172525bd-6c7f-4e76-b7b4-47c937c33a14\") " pod="openstack/cinder-api-0" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.529945 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/172525bd-6c7f-4e76-b7b4-47c937c33a14-logs\") pod \"cinder-api-0\" (UID: \"172525bd-6c7f-4e76-b7b4-47c937c33a14\") " pod="openstack/cinder-api-0" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.530527 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/172525bd-6c7f-4e76-b7b4-47c937c33a14-logs\") pod \"cinder-api-0\" (UID: \"172525bd-6c7f-4e76-b7b4-47c937c33a14\") " pod="openstack/cinder-api-0" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.534655 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/172525bd-6c7f-4e76-b7b4-47c937c33a14-public-tls-certs\") pod \"cinder-api-0\" (UID: \"172525bd-6c7f-4e76-b7b4-47c937c33a14\") " pod="openstack/cinder-api-0" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.534867 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/172525bd-6c7f-4e76-b7b4-47c937c33a14-config-data-custom\") pod \"cinder-api-0\" (UID: \"172525bd-6c7f-4e76-b7b4-47c937c33a14\") " pod="openstack/cinder-api-0" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.535053 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/172525bd-6c7f-4e76-b7b4-47c937c33a14-etc-machine-id\") pod \"cinder-api-0\" (UID: \"172525bd-6c7f-4e76-b7b4-47c937c33a14\") " pod="openstack/cinder-api-0" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.535685 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/172525bd-6c7f-4e76-b7b4-47c937c33a14-config-data\") pod \"cinder-api-0\" (UID: \"172525bd-6c7f-4e76-b7b4-47c937c33a14\") " pod="openstack/cinder-api-0" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.536231 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/172525bd-6c7f-4e76-b7b4-47c937c33a14-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"172525bd-6c7f-4e76-b7b4-47c937c33a14\") " pod="openstack/cinder-api-0" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.539192 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/172525bd-6c7f-4e76-b7b4-47c937c33a14-scripts\") pod \"cinder-api-0\" (UID: \"172525bd-6c7f-4e76-b7b4-47c937c33a14\") " pod="openstack/cinder-api-0" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.540172 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/172525bd-6c7f-4e76-b7b4-47c937c33a14-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"172525bd-6c7f-4e76-b7b4-47c937c33a14\") " pod="openstack/cinder-api-0" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.547864 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bzfvv\" (UniqueName: \"kubernetes.io/projected/172525bd-6c7f-4e76-b7b4-47c937c33a14-kube-api-access-bzfvv\") pod \"cinder-api-0\" (UID: \"172525bd-6c7f-4e76-b7b4-47c937c33a14\") " pod="openstack/cinder-api-0" Nov 26 22:42:43 crc kubenswrapper[4903]: I1126 22:42:43.552234 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 22:42:44 crc kubenswrapper[4903]: I1126 22:42:44.060144 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8c4f9de8-7b0d-4531-86bc-e476df671b79" path="/var/lib/kubelet/pods/8c4f9de8-7b0d-4531-86bc-e476df671b79/volumes" Nov 26 22:42:44 crc kubenswrapper[4903]: I1126 22:42:44.061165 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 22:42:44 crc kubenswrapper[4903]: I1126 22:42:44.146836 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3f11fc1d-817f-4c2a-a777-a4ac0f568b07","Type":"ContainerStarted","Data":"180065f48eebcb22ee86fd9aefe971cd4dc6d64ab8679491f522210e3458e6be"} Nov 26 22:42:44 crc kubenswrapper[4903]: I1126 22:42:44.148892 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"172525bd-6c7f-4e76-b7b4-47c937c33a14","Type":"ContainerStarted","Data":"eb7138a3b44bdb5c57dd5d81055cb9cbca1eb32f9faf3c7e14d9a28db4af46cf"} Nov 26 22:42:45 crc kubenswrapper[4903]: I1126 22:42:45.069505 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5bbd968879-hmnnt" Nov 26 22:42:45 crc kubenswrapper[4903]: I1126 22:42:45.199262 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-67dffd5468-nzpt8"] Nov 26 22:42:45 crc kubenswrapper[4903]: I1126 22:42:45.202952 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-67dffd5468-nzpt8" podUID="892b1cdc-def0-4620-b3b6-d9cc248b33bb" containerName="neutron-api" containerID="cri-o://ee374de47b2bbe4c5585c033c9bc204aeb8bfb52fc46b943a7e85f4ceb655027" gracePeriod=30 Nov 26 22:42:45 crc kubenswrapper[4903]: I1126 22:42:45.204310 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-67dffd5468-nzpt8" podUID="892b1cdc-def0-4620-b3b6-d9cc248b33bb" containerName="neutron-httpd" containerID="cri-o://1e234fbb81e8fdfd2f422f70e8ea8a8560118a0a403f5e8261ca3ebcf00e82c5" gracePeriod=30 Nov 26 22:42:45 crc kubenswrapper[4903]: I1126 22:42:45.241514 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3f11fc1d-817f-4c2a-a777-a4ac0f568b07","Type":"ContainerStarted","Data":"fff991dc19fb76a695f02d9175f28c9eee9b48598401b45ed8731f57bf9d7bdb"} Nov 26 22:42:45 crc kubenswrapper[4903]: I1126 22:42:45.244565 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"172525bd-6c7f-4e76-b7b4-47c937c33a14","Type":"ContainerStarted","Data":"409b0dfd3a1e2ae05503d63674b486c94bf98ad90fc1c5fe42eee70a4eedf753"} Nov 26 22:42:45 crc kubenswrapper[4903]: I1126 22:42:45.552367 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-86bcb477db-8xtr8" Nov 26 22:42:45 crc kubenswrapper[4903]: I1126 22:42:45.917295 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-86bcb477db-8xtr8" Nov 26 22:42:46 crc kubenswrapper[4903]: I1126 22:42:46.067942 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-674b64b8b-7ztpj"] Nov 26 22:42:46 crc kubenswrapper[4903]: I1126 22:42:46.068213 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-674b64b8b-7ztpj" podUID="c5e25b6a-d8fd-404a-b7f9-7ebad060a071" containerName="barbican-api-log" containerID="cri-o://55f1150f61627d147e75510fea1af37e612819119a133099b4fb25ce81d5989c" gracePeriod=30 Nov 26 22:42:46 crc kubenswrapper[4903]: I1126 22:42:46.068763 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-674b64b8b-7ztpj" podUID="c5e25b6a-d8fd-404a-b7f9-7ebad060a071" containerName="barbican-api" containerID="cri-o://09494e81cab83fe57030cdf3ed18d68afb788438da6418f10e3dc11c04c4d745" gracePeriod=30 Nov 26 22:42:46 crc kubenswrapper[4903]: I1126 22:42:46.271480 4903 generic.go:334] "Generic (PLEG): container finished" podID="892b1cdc-def0-4620-b3b6-d9cc248b33bb" containerID="1e234fbb81e8fdfd2f422f70e8ea8a8560118a0a403f5e8261ca3ebcf00e82c5" exitCode=0 Nov 26 22:42:46 crc kubenswrapper[4903]: I1126 22:42:46.271762 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-67dffd5468-nzpt8" event={"ID":"892b1cdc-def0-4620-b3b6-d9cc248b33bb","Type":"ContainerDied","Data":"1e234fbb81e8fdfd2f422f70e8ea8a8560118a0a403f5e8261ca3ebcf00e82c5"} Nov 26 22:42:46 crc kubenswrapper[4903]: I1126 22:42:46.299960 4903 generic.go:334] "Generic (PLEG): container finished" podID="c5e25b6a-d8fd-404a-b7f9-7ebad060a071" containerID="55f1150f61627d147e75510fea1af37e612819119a133099b4fb25ce81d5989c" exitCode=143 Nov 26 22:42:46 crc kubenswrapper[4903]: I1126 22:42:46.300043 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-674b64b8b-7ztpj" event={"ID":"c5e25b6a-d8fd-404a-b7f9-7ebad060a071","Type":"ContainerDied","Data":"55f1150f61627d147e75510fea1af37e612819119a133099b4fb25ce81d5989c"} Nov 26 22:42:46 crc kubenswrapper[4903]: I1126 22:42:46.310184 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3f11fc1d-817f-4c2a-a777-a4ac0f568b07","Type":"ContainerStarted","Data":"e264e63eabdb973cf35d856c39ff29b7250aaaecbd7cb02474dd78e0b2ad41a7"} Nov 26 22:42:46 crc kubenswrapper[4903]: I1126 22:42:46.311425 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 22:42:46 crc kubenswrapper[4903]: I1126 22:42:46.314820 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"172525bd-6c7f-4e76-b7b4-47c937c33a14","Type":"ContainerStarted","Data":"03b72c65fe6987147ddac3d7ccb6f62f1dba3d17dea10b2a117be971dc48b7c8"} Nov 26 22:42:46 crc kubenswrapper[4903]: I1126 22:42:46.315123 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 26 22:42:46 crc kubenswrapper[4903]: I1126 22:42:46.352997 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.574060126 podStartE2EDuration="5.352977633s" podCreationTimestamp="2025-11-26 22:42:41 +0000 UTC" firstStartedPulling="2025-11-26 22:42:42.150818936 +0000 UTC m=+1290.841053846" lastFinishedPulling="2025-11-26 22:42:45.929736443 +0000 UTC m=+1294.619971353" observedRunningTime="2025-11-26 22:42:46.331616671 +0000 UTC m=+1295.021851581" watchObservedRunningTime="2025-11-26 22:42:46.352977633 +0000 UTC m=+1295.043212533" Nov 26 22:42:46 crc kubenswrapper[4903]: I1126 22:42:46.367489 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.367473001 podStartE2EDuration="3.367473001s" podCreationTimestamp="2025-11-26 22:42:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:42:46.349822408 +0000 UTC m=+1295.040057338" watchObservedRunningTime="2025-11-26 22:42:46.367473001 +0000 UTC m=+1295.057707911" Nov 26 22:42:47 crc kubenswrapper[4903]: I1126 22:42:47.513914 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" Nov 26 22:42:47 crc kubenswrapper[4903]: I1126 22:42:47.596475 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-l8k8n"] Nov 26 22:42:47 crc kubenswrapper[4903]: I1126 22:42:47.596715 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" podUID="f6a087fc-426b-4353-b0dc-552c66dfef8a" containerName="dnsmasq-dns" containerID="cri-o://32947ba5fa7a59a9e70901886ea54f300b231470d791724a8f43322735c4048a" gracePeriod=10 Nov 26 22:42:47 crc kubenswrapper[4903]: I1126 22:42:47.802366 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 26 22:42:47 crc kubenswrapper[4903]: I1126 22:42:47.841966 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.165852 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.234447 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f6a087fc-426b-4353-b0dc-552c66dfef8a-ovsdbserver-sb\") pod \"f6a087fc-426b-4353-b0dc-552c66dfef8a\" (UID: \"f6a087fc-426b-4353-b0dc-552c66dfef8a\") " Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.234508 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f6a087fc-426b-4353-b0dc-552c66dfef8a-dns-svc\") pod \"f6a087fc-426b-4353-b0dc-552c66dfef8a\" (UID: \"f6a087fc-426b-4353-b0dc-552c66dfef8a\") " Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.234598 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f6a087fc-426b-4353-b0dc-552c66dfef8a-dns-swift-storage-0\") pod \"f6a087fc-426b-4353-b0dc-552c66dfef8a\" (UID: \"f6a087fc-426b-4353-b0dc-552c66dfef8a\") " Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.234618 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6cqzr\" (UniqueName: \"kubernetes.io/projected/f6a087fc-426b-4353-b0dc-552c66dfef8a-kube-api-access-6cqzr\") pod \"f6a087fc-426b-4353-b0dc-552c66dfef8a\" (UID: \"f6a087fc-426b-4353-b0dc-552c66dfef8a\") " Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.235335 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f6a087fc-426b-4353-b0dc-552c66dfef8a-ovsdbserver-nb\") pod \"f6a087fc-426b-4353-b0dc-552c66dfef8a\" (UID: \"f6a087fc-426b-4353-b0dc-552c66dfef8a\") " Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.236148 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f6a087fc-426b-4353-b0dc-552c66dfef8a-config\") pod \"f6a087fc-426b-4353-b0dc-552c66dfef8a\" (UID: \"f6a087fc-426b-4353-b0dc-552c66dfef8a\") " Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.271106 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6a087fc-426b-4353-b0dc-552c66dfef8a-kube-api-access-6cqzr" (OuterVolumeSpecName: "kube-api-access-6cqzr") pod "f6a087fc-426b-4353-b0dc-552c66dfef8a" (UID: "f6a087fc-426b-4353-b0dc-552c66dfef8a"). InnerVolumeSpecName "kube-api-access-6cqzr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.304935 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f6a087fc-426b-4353-b0dc-552c66dfef8a-config" (OuterVolumeSpecName: "config") pod "f6a087fc-426b-4353-b0dc-552c66dfef8a" (UID: "f6a087fc-426b-4353-b0dc-552c66dfef8a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.311631 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f6a087fc-426b-4353-b0dc-552c66dfef8a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f6a087fc-426b-4353-b0dc-552c66dfef8a" (UID: "f6a087fc-426b-4353-b0dc-552c66dfef8a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.323265 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f6a087fc-426b-4353-b0dc-552c66dfef8a-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "f6a087fc-426b-4353-b0dc-552c66dfef8a" (UID: "f6a087fc-426b-4353-b0dc-552c66dfef8a"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.339408 4903 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f6a087fc-426b-4353-b0dc-552c66dfef8a-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.339440 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6cqzr\" (UniqueName: \"kubernetes.io/projected/f6a087fc-426b-4353-b0dc-552c66dfef8a-kube-api-access-6cqzr\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.339450 4903 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f6a087fc-426b-4353-b0dc-552c66dfef8a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.339461 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f6a087fc-426b-4353-b0dc-552c66dfef8a-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.340077 4903 generic.go:334] "Generic (PLEG): container finished" podID="f6a087fc-426b-4353-b0dc-552c66dfef8a" containerID="32947ba5fa7a59a9e70901886ea54f300b231470d791724a8f43322735c4048a" exitCode=0 Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.340120 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.340155 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" event={"ID":"f6a087fc-426b-4353-b0dc-552c66dfef8a","Type":"ContainerDied","Data":"32947ba5fa7a59a9e70901886ea54f300b231470d791724a8f43322735c4048a"} Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.340190 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-l8k8n" event={"ID":"f6a087fc-426b-4353-b0dc-552c66dfef8a","Type":"ContainerDied","Data":"b1fd7ba1e1457d1176528bcfba8e69374f3011da67b99fc1620fa994eadd710b"} Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.340211 4903 scope.go:117] "RemoveContainer" containerID="32947ba5fa7a59a9e70901886ea54f300b231470d791724a8f43322735c4048a" Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.340664 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="7fa1244c-f57b-4679-8845-ad503d45384f" containerName="cinder-scheduler" containerID="cri-o://9757fdc6186f38754d3ae40d93c75c3c4cfa54eeef390b6bdf2e68fa563fd09e" gracePeriod=30 Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.340815 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="7fa1244c-f57b-4679-8845-ad503d45384f" containerName="probe" containerID="cri-o://12412c8199d980c80b79cd4963192d726eaadf4dbd064bf19c473fa062ca7b7b" gracePeriod=30 Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.340944 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f6a087fc-426b-4353-b0dc-552c66dfef8a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f6a087fc-426b-4353-b0dc-552c66dfef8a" (UID: "f6a087fc-426b-4353-b0dc-552c66dfef8a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.354012 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f6a087fc-426b-4353-b0dc-552c66dfef8a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f6a087fc-426b-4353-b0dc-552c66dfef8a" (UID: "f6a087fc-426b-4353-b0dc-552c66dfef8a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.370846 4903 scope.go:117] "RemoveContainer" containerID="979026f7e18615291d4b5f9a0322c758dee8ead43dd68fbd87520f88de3d7f95" Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.394204 4903 scope.go:117] "RemoveContainer" containerID="32947ba5fa7a59a9e70901886ea54f300b231470d791724a8f43322735c4048a" Nov 26 22:42:48 crc kubenswrapper[4903]: E1126 22:42:48.394623 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"32947ba5fa7a59a9e70901886ea54f300b231470d791724a8f43322735c4048a\": container with ID starting with 32947ba5fa7a59a9e70901886ea54f300b231470d791724a8f43322735c4048a not found: ID does not exist" containerID="32947ba5fa7a59a9e70901886ea54f300b231470d791724a8f43322735c4048a" Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.394674 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32947ba5fa7a59a9e70901886ea54f300b231470d791724a8f43322735c4048a"} err="failed to get container status \"32947ba5fa7a59a9e70901886ea54f300b231470d791724a8f43322735c4048a\": rpc error: code = NotFound desc = could not find container \"32947ba5fa7a59a9e70901886ea54f300b231470d791724a8f43322735c4048a\": container with ID starting with 32947ba5fa7a59a9e70901886ea54f300b231470d791724a8f43322735c4048a not found: ID does not exist" Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.394789 4903 scope.go:117] "RemoveContainer" containerID="979026f7e18615291d4b5f9a0322c758dee8ead43dd68fbd87520f88de3d7f95" Nov 26 22:42:48 crc kubenswrapper[4903]: E1126 22:42:48.395014 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"979026f7e18615291d4b5f9a0322c758dee8ead43dd68fbd87520f88de3d7f95\": container with ID starting with 979026f7e18615291d4b5f9a0322c758dee8ead43dd68fbd87520f88de3d7f95 not found: ID does not exist" containerID="979026f7e18615291d4b5f9a0322c758dee8ead43dd68fbd87520f88de3d7f95" Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.395044 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"979026f7e18615291d4b5f9a0322c758dee8ead43dd68fbd87520f88de3d7f95"} err="failed to get container status \"979026f7e18615291d4b5f9a0322c758dee8ead43dd68fbd87520f88de3d7f95\": rpc error: code = NotFound desc = could not find container \"979026f7e18615291d4b5f9a0322c758dee8ead43dd68fbd87520f88de3d7f95\": container with ID starting with 979026f7e18615291d4b5f9a0322c758dee8ead43dd68fbd87520f88de3d7f95 not found: ID does not exist" Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.471933 4903 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f6a087fc-426b-4353-b0dc-552c66dfef8a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.472135 4903 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f6a087fc-426b-4353-b0dc-552c66dfef8a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.700209 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-l8k8n"] Nov 26 22:42:48 crc kubenswrapper[4903]: I1126 22:42:48.711799 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-l8k8n"] Nov 26 22:42:49 crc kubenswrapper[4903]: I1126 22:42:49.355750 4903 generic.go:334] "Generic (PLEG): container finished" podID="7fa1244c-f57b-4679-8845-ad503d45384f" containerID="12412c8199d980c80b79cd4963192d726eaadf4dbd064bf19c473fa062ca7b7b" exitCode=0 Nov 26 22:42:49 crc kubenswrapper[4903]: I1126 22:42:49.355990 4903 generic.go:334] "Generic (PLEG): container finished" podID="7fa1244c-f57b-4679-8845-ad503d45384f" containerID="9757fdc6186f38754d3ae40d93c75c3c4cfa54eeef390b6bdf2e68fa563fd09e" exitCode=0 Nov 26 22:42:49 crc kubenswrapper[4903]: I1126 22:42:49.357432 4903 generic.go:334] "Generic (PLEG): container finished" podID="892b1cdc-def0-4620-b3b6-d9cc248b33bb" containerID="ee374de47b2bbe4c5585c033c9bc204aeb8bfb52fc46b943a7e85f4ceb655027" exitCode=0 Nov 26 22:42:49 crc kubenswrapper[4903]: I1126 22:42:49.357756 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"7fa1244c-f57b-4679-8845-ad503d45384f","Type":"ContainerDied","Data":"12412c8199d980c80b79cd4963192d726eaadf4dbd064bf19c473fa062ca7b7b"} Nov 26 22:42:49 crc kubenswrapper[4903]: I1126 22:42:49.357800 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"7fa1244c-f57b-4679-8845-ad503d45384f","Type":"ContainerDied","Data":"9757fdc6186f38754d3ae40d93c75c3c4cfa54eeef390b6bdf2e68fa563fd09e"} Nov 26 22:42:49 crc kubenswrapper[4903]: I1126 22:42:49.357813 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-67dffd5468-nzpt8" event={"ID":"892b1cdc-def0-4620-b3b6-d9cc248b33bb","Type":"ContainerDied","Data":"ee374de47b2bbe4c5585c033c9bc204aeb8bfb52fc46b943a7e85f4ceb655027"} Nov 26 22:42:49 crc kubenswrapper[4903]: I1126 22:42:49.538492 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-674b64b8b-7ztpj" podUID="c5e25b6a-d8fd-404a-b7f9-7ebad060a071" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.196:9311/healthcheck\": read tcp 10.217.0.2:55528->10.217.0.196:9311: read: connection reset by peer" Nov 26 22:42:49 crc kubenswrapper[4903]: I1126 22:42:49.538579 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-674b64b8b-7ztpj" podUID="c5e25b6a-d8fd-404a-b7f9-7ebad060a071" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.196:9311/healthcheck\": read tcp 10.217.0.2:55542->10.217.0.196:9311: read: connection reset by peer" Nov 26 22:42:49 crc kubenswrapper[4903]: I1126 22:42:49.882118 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-67dffd5468-nzpt8" Nov 26 22:42:49 crc kubenswrapper[4903]: I1126 22:42:49.895480 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.021440 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7fa1244c-f57b-4679-8845-ad503d45384f-config-data\") pod \"7fa1244c-f57b-4679-8845-ad503d45384f\" (UID: \"7fa1244c-f57b-4679-8845-ad503d45384f\") " Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.022307 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/892b1cdc-def0-4620-b3b6-d9cc248b33bb-combined-ca-bundle\") pod \"892b1cdc-def0-4620-b3b6-d9cc248b33bb\" (UID: \"892b1cdc-def0-4620-b3b6-d9cc248b33bb\") " Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.022446 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7fa1244c-f57b-4679-8845-ad503d45384f-etc-machine-id\") pod \"7fa1244c-f57b-4679-8845-ad503d45384f\" (UID: \"7fa1244c-f57b-4679-8845-ad503d45384f\") " Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.022495 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fa1244c-f57b-4679-8845-ad503d45384f-combined-ca-bundle\") pod \"7fa1244c-f57b-4679-8845-ad503d45384f\" (UID: \"7fa1244c-f57b-4679-8845-ad503d45384f\") " Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.022555 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b8pzp\" (UniqueName: \"kubernetes.io/projected/892b1cdc-def0-4620-b3b6-d9cc248b33bb-kube-api-access-b8pzp\") pod \"892b1cdc-def0-4620-b3b6-d9cc248b33bb\" (UID: \"892b1cdc-def0-4620-b3b6-d9cc248b33bb\") " Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.022590 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7fa1244c-f57b-4679-8845-ad503d45384f-scripts\") pod \"7fa1244c-f57b-4679-8845-ad503d45384f\" (UID: \"7fa1244c-f57b-4679-8845-ad503d45384f\") " Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.022673 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7fa1244c-f57b-4679-8845-ad503d45384f-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "7fa1244c-f57b-4679-8845-ad503d45384f" (UID: "7fa1244c-f57b-4679-8845-ad503d45384f"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.022713 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/892b1cdc-def0-4620-b3b6-d9cc248b33bb-config\") pod \"892b1cdc-def0-4620-b3b6-d9cc248b33bb\" (UID: \"892b1cdc-def0-4620-b3b6-d9cc248b33bb\") " Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.022986 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/892b1cdc-def0-4620-b3b6-d9cc248b33bb-httpd-config\") pod \"892b1cdc-def0-4620-b3b6-d9cc248b33bb\" (UID: \"892b1cdc-def0-4620-b3b6-d9cc248b33bb\") " Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.023056 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h5zkk\" (UniqueName: \"kubernetes.io/projected/7fa1244c-f57b-4679-8845-ad503d45384f-kube-api-access-h5zkk\") pod \"7fa1244c-f57b-4679-8845-ad503d45384f\" (UID: \"7fa1244c-f57b-4679-8845-ad503d45384f\") " Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.023090 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/892b1cdc-def0-4620-b3b6-d9cc248b33bb-ovndb-tls-certs\") pod \"892b1cdc-def0-4620-b3b6-d9cc248b33bb\" (UID: \"892b1cdc-def0-4620-b3b6-d9cc248b33bb\") " Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.023135 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7fa1244c-f57b-4679-8845-ad503d45384f-config-data-custom\") pod \"7fa1244c-f57b-4679-8845-ad503d45384f\" (UID: \"7fa1244c-f57b-4679-8845-ad503d45384f\") " Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.024241 4903 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7fa1244c-f57b-4679-8845-ad503d45384f-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.028894 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/892b1cdc-def0-4620-b3b6-d9cc248b33bb-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "892b1cdc-def0-4620-b3b6-d9cc248b33bb" (UID: "892b1cdc-def0-4620-b3b6-d9cc248b33bb"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.031811 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/892b1cdc-def0-4620-b3b6-d9cc248b33bb-kube-api-access-b8pzp" (OuterVolumeSpecName: "kube-api-access-b8pzp") pod "892b1cdc-def0-4620-b3b6-d9cc248b33bb" (UID: "892b1cdc-def0-4620-b3b6-d9cc248b33bb"). InnerVolumeSpecName "kube-api-access-b8pzp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.044571 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7fa1244c-f57b-4679-8845-ad503d45384f-kube-api-access-h5zkk" (OuterVolumeSpecName: "kube-api-access-h5zkk") pod "7fa1244c-f57b-4679-8845-ad503d45384f" (UID: "7fa1244c-f57b-4679-8845-ad503d45384f"). InnerVolumeSpecName "kube-api-access-h5zkk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.048736 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6a087fc-426b-4353-b0dc-552c66dfef8a" path="/var/lib/kubelet/pods/f6a087fc-426b-4353-b0dc-552c66dfef8a/volumes" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.050115 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fa1244c-f57b-4679-8845-ad503d45384f-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "7fa1244c-f57b-4679-8845-ad503d45384f" (UID: "7fa1244c-f57b-4679-8845-ad503d45384f"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.057990 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fa1244c-f57b-4679-8845-ad503d45384f-scripts" (OuterVolumeSpecName: "scripts") pod "7fa1244c-f57b-4679-8845-ad503d45384f" (UID: "7fa1244c-f57b-4679-8845-ad503d45384f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.105954 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fa1244c-f57b-4679-8845-ad503d45384f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7fa1244c-f57b-4679-8845-ad503d45384f" (UID: "7fa1244c-f57b-4679-8845-ad503d45384f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.110341 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/892b1cdc-def0-4620-b3b6-d9cc248b33bb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "892b1cdc-def0-4620-b3b6-d9cc248b33bb" (UID: "892b1cdc-def0-4620-b3b6-d9cc248b33bb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.112882 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/892b1cdc-def0-4620-b3b6-d9cc248b33bb-config" (OuterVolumeSpecName: "config") pod "892b1cdc-def0-4620-b3b6-d9cc248b33bb" (UID: "892b1cdc-def0-4620-b3b6-d9cc248b33bb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.125783 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b8pzp\" (UniqueName: \"kubernetes.io/projected/892b1cdc-def0-4620-b3b6-d9cc248b33bb-kube-api-access-b8pzp\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.125806 4903 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7fa1244c-f57b-4679-8845-ad503d45384f-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.125816 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/892b1cdc-def0-4620-b3b6-d9cc248b33bb-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.125825 4903 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/892b1cdc-def0-4620-b3b6-d9cc248b33bb-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.125834 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h5zkk\" (UniqueName: \"kubernetes.io/projected/7fa1244c-f57b-4679-8845-ad503d45384f-kube-api-access-h5zkk\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.125842 4903 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7fa1244c-f57b-4679-8845-ad503d45384f-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.125850 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/892b1cdc-def0-4620-b3b6-d9cc248b33bb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.125859 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fa1244c-f57b-4679-8845-ad503d45384f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.133432 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-674b64b8b-7ztpj" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.156339 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/892b1cdc-def0-4620-b3b6-d9cc248b33bb-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "892b1cdc-def0-4620-b3b6-d9cc248b33bb" (UID: "892b1cdc-def0-4620-b3b6-d9cc248b33bb"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.174137 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fa1244c-f57b-4679-8845-ad503d45384f-config-data" (OuterVolumeSpecName: "config-data") pod "7fa1244c-f57b-4679-8845-ad503d45384f" (UID: "7fa1244c-f57b-4679-8845-ad503d45384f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.226867 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5e25b6a-d8fd-404a-b7f9-7ebad060a071-config-data\") pod \"c5e25b6a-d8fd-404a-b7f9-7ebad060a071\" (UID: \"c5e25b6a-d8fd-404a-b7f9-7ebad060a071\") " Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.227141 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c5e25b6a-d8fd-404a-b7f9-7ebad060a071-config-data-custom\") pod \"c5e25b6a-d8fd-404a-b7f9-7ebad060a071\" (UID: \"c5e25b6a-d8fd-404a-b7f9-7ebad060a071\") " Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.227206 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rsrtk\" (UniqueName: \"kubernetes.io/projected/c5e25b6a-d8fd-404a-b7f9-7ebad060a071-kube-api-access-rsrtk\") pod \"c5e25b6a-d8fd-404a-b7f9-7ebad060a071\" (UID: \"c5e25b6a-d8fd-404a-b7f9-7ebad060a071\") " Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.227257 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5e25b6a-d8fd-404a-b7f9-7ebad060a071-combined-ca-bundle\") pod \"c5e25b6a-d8fd-404a-b7f9-7ebad060a071\" (UID: \"c5e25b6a-d8fd-404a-b7f9-7ebad060a071\") " Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.227298 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5e25b6a-d8fd-404a-b7f9-7ebad060a071-logs\") pod \"c5e25b6a-d8fd-404a-b7f9-7ebad060a071\" (UID: \"c5e25b6a-d8fd-404a-b7f9-7ebad060a071\") " Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.227825 4903 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/892b1cdc-def0-4620-b3b6-d9cc248b33bb-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.227845 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7fa1244c-f57b-4679-8845-ad503d45384f-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.228211 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5e25b6a-d8fd-404a-b7f9-7ebad060a071-logs" (OuterVolumeSpecName: "logs") pod "c5e25b6a-d8fd-404a-b7f9-7ebad060a071" (UID: "c5e25b6a-d8fd-404a-b7f9-7ebad060a071"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.231580 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5e25b6a-d8fd-404a-b7f9-7ebad060a071-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c5e25b6a-d8fd-404a-b7f9-7ebad060a071" (UID: "c5e25b6a-d8fd-404a-b7f9-7ebad060a071"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.232558 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5e25b6a-d8fd-404a-b7f9-7ebad060a071-kube-api-access-rsrtk" (OuterVolumeSpecName: "kube-api-access-rsrtk") pod "c5e25b6a-d8fd-404a-b7f9-7ebad060a071" (UID: "c5e25b6a-d8fd-404a-b7f9-7ebad060a071"). InnerVolumeSpecName "kube-api-access-rsrtk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.256535 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5e25b6a-d8fd-404a-b7f9-7ebad060a071-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c5e25b6a-d8fd-404a-b7f9-7ebad060a071" (UID: "c5e25b6a-d8fd-404a-b7f9-7ebad060a071"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.278485 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5e25b6a-d8fd-404a-b7f9-7ebad060a071-config-data" (OuterVolumeSpecName: "config-data") pod "c5e25b6a-d8fd-404a-b7f9-7ebad060a071" (UID: "c5e25b6a-d8fd-404a-b7f9-7ebad060a071"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.330953 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5e25b6a-d8fd-404a-b7f9-7ebad060a071-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.330992 4903 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c5e25b6a-d8fd-404a-b7f9-7ebad060a071-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.331041 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rsrtk\" (UniqueName: \"kubernetes.io/projected/c5e25b6a-d8fd-404a-b7f9-7ebad060a071-kube-api-access-rsrtk\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.331057 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5e25b6a-d8fd-404a-b7f9-7ebad060a071-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.331069 4903 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5e25b6a-d8fd-404a-b7f9-7ebad060a071-logs\") on node \"crc\" DevicePath \"\"" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.371930 4903 generic.go:334] "Generic (PLEG): container finished" podID="c5e25b6a-d8fd-404a-b7f9-7ebad060a071" containerID="09494e81cab83fe57030cdf3ed18d68afb788438da6418f10e3dc11c04c4d745" exitCode=0 Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.371983 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-674b64b8b-7ztpj" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.372011 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-674b64b8b-7ztpj" event={"ID":"c5e25b6a-d8fd-404a-b7f9-7ebad060a071","Type":"ContainerDied","Data":"09494e81cab83fe57030cdf3ed18d68afb788438da6418f10e3dc11c04c4d745"} Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.372162 4903 scope.go:117] "RemoveContainer" containerID="09494e81cab83fe57030cdf3ed18d68afb788438da6418f10e3dc11c04c4d745" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.372171 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-674b64b8b-7ztpj" event={"ID":"c5e25b6a-d8fd-404a-b7f9-7ebad060a071","Type":"ContainerDied","Data":"bbbe63fadf21dac554d3015cf1ed260a2f8ccf34be6ac5507eda98e5aec56f31"} Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.374870 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"7fa1244c-f57b-4679-8845-ad503d45384f","Type":"ContainerDied","Data":"be99447960da141d7f7e3073e39bfcbe94862bc80e4f48e48877f9f3bf689f3d"} Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.375149 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.377189 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-67dffd5468-nzpt8" event={"ID":"892b1cdc-def0-4620-b3b6-d9cc248b33bb","Type":"ContainerDied","Data":"6d8d79495e272f8ebb018a4f058c8c28e7c27632d42d81d8c5a1a8058066cc64"} Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.377248 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-67dffd5468-nzpt8" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.407638 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-674b64b8b-7ztpj"] Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.448785 4903 scope.go:117] "RemoveContainer" containerID="55f1150f61627d147e75510fea1af37e612819119a133099b4fb25ce81d5989c" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.453772 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-674b64b8b-7ztpj"] Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.465357 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.474095 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.483918 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 22:42:50 crc kubenswrapper[4903]: E1126 22:42:50.484436 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5e25b6a-d8fd-404a-b7f9-7ebad060a071" containerName="barbican-api-log" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.484455 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5e25b6a-d8fd-404a-b7f9-7ebad060a071" containerName="barbican-api-log" Nov 26 22:42:50 crc kubenswrapper[4903]: E1126 22:42:50.484486 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="892b1cdc-def0-4620-b3b6-d9cc248b33bb" containerName="neutron-httpd" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.484495 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="892b1cdc-def0-4620-b3b6-d9cc248b33bb" containerName="neutron-httpd" Nov 26 22:42:50 crc kubenswrapper[4903]: E1126 22:42:50.484512 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="892b1cdc-def0-4620-b3b6-d9cc248b33bb" containerName="neutron-api" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.484520 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="892b1cdc-def0-4620-b3b6-d9cc248b33bb" containerName="neutron-api" Nov 26 22:42:50 crc kubenswrapper[4903]: E1126 22:42:50.484534 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6a087fc-426b-4353-b0dc-552c66dfef8a" containerName="init" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.484542 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6a087fc-426b-4353-b0dc-552c66dfef8a" containerName="init" Nov 26 22:42:50 crc kubenswrapper[4903]: E1126 22:42:50.484562 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6a087fc-426b-4353-b0dc-552c66dfef8a" containerName="dnsmasq-dns" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.484569 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6a087fc-426b-4353-b0dc-552c66dfef8a" containerName="dnsmasq-dns" Nov 26 22:42:50 crc kubenswrapper[4903]: E1126 22:42:50.484585 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fa1244c-f57b-4679-8845-ad503d45384f" containerName="cinder-scheduler" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.484592 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fa1244c-f57b-4679-8845-ad503d45384f" containerName="cinder-scheduler" Nov 26 22:42:50 crc kubenswrapper[4903]: E1126 22:42:50.484605 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fa1244c-f57b-4679-8845-ad503d45384f" containerName="probe" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.484612 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fa1244c-f57b-4679-8845-ad503d45384f" containerName="probe" Nov 26 22:42:50 crc kubenswrapper[4903]: E1126 22:42:50.484634 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5e25b6a-d8fd-404a-b7f9-7ebad060a071" containerName="barbican-api" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.484645 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5e25b6a-d8fd-404a-b7f9-7ebad060a071" containerName="barbican-api" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.484892 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="7fa1244c-f57b-4679-8845-ad503d45384f" containerName="cinder-scheduler" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.484921 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="892b1cdc-def0-4620-b3b6-d9cc248b33bb" containerName="neutron-httpd" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.484935 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="7fa1244c-f57b-4679-8845-ad503d45384f" containerName="probe" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.484950 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6a087fc-426b-4353-b0dc-552c66dfef8a" containerName="dnsmasq-dns" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.484968 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="892b1cdc-def0-4620-b3b6-d9cc248b33bb" containerName="neutron-api" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.486648 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5e25b6a-d8fd-404a-b7f9-7ebad060a071" containerName="barbican-api" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.486742 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5e25b6a-d8fd-404a-b7f9-7ebad060a071" containerName="barbican-api-log" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.493296 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.495825 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-67dffd5468-nzpt8"] Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.496648 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.510464 4903 scope.go:117] "RemoveContainer" containerID="09494e81cab83fe57030cdf3ed18d68afb788438da6418f10e3dc11c04c4d745" Nov 26 22:42:50 crc kubenswrapper[4903]: E1126 22:42:50.512168 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09494e81cab83fe57030cdf3ed18d68afb788438da6418f10e3dc11c04c4d745\": container with ID starting with 09494e81cab83fe57030cdf3ed18d68afb788438da6418f10e3dc11c04c4d745 not found: ID does not exist" containerID="09494e81cab83fe57030cdf3ed18d68afb788438da6418f10e3dc11c04c4d745" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.512229 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09494e81cab83fe57030cdf3ed18d68afb788438da6418f10e3dc11c04c4d745"} err="failed to get container status \"09494e81cab83fe57030cdf3ed18d68afb788438da6418f10e3dc11c04c4d745\": rpc error: code = NotFound desc = could not find container \"09494e81cab83fe57030cdf3ed18d68afb788438da6418f10e3dc11c04c4d745\": container with ID starting with 09494e81cab83fe57030cdf3ed18d68afb788438da6418f10e3dc11c04c4d745 not found: ID does not exist" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.512260 4903 scope.go:117] "RemoveContainer" containerID="55f1150f61627d147e75510fea1af37e612819119a133099b4fb25ce81d5989c" Nov 26 22:42:50 crc kubenswrapper[4903]: E1126 22:42:50.512680 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"55f1150f61627d147e75510fea1af37e612819119a133099b4fb25ce81d5989c\": container with ID starting with 55f1150f61627d147e75510fea1af37e612819119a133099b4fb25ce81d5989c not found: ID does not exist" containerID="55f1150f61627d147e75510fea1af37e612819119a133099b4fb25ce81d5989c" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.512744 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"55f1150f61627d147e75510fea1af37e612819119a133099b4fb25ce81d5989c"} err="failed to get container status \"55f1150f61627d147e75510fea1af37e612819119a133099b4fb25ce81d5989c\": rpc error: code = NotFound desc = could not find container \"55f1150f61627d147e75510fea1af37e612819119a133099b4fb25ce81d5989c\": container with ID starting with 55f1150f61627d147e75510fea1af37e612819119a133099b4fb25ce81d5989c not found: ID does not exist" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.512773 4903 scope.go:117] "RemoveContainer" containerID="12412c8199d980c80b79cd4963192d726eaadf4dbd064bf19c473fa062ca7b7b" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.539355 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-67dffd5468-nzpt8"] Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.540144 4903 scope.go:117] "RemoveContainer" containerID="9757fdc6186f38754d3ae40d93c75c3c4cfa54eeef390b6bdf2e68fa563fd09e" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.549644 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.562667 4903 scope.go:117] "RemoveContainer" containerID="1e234fbb81e8fdfd2f422f70e8ea8a8560118a0a403f5e8261ca3ebcf00e82c5" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.593097 4903 scope.go:117] "RemoveContainer" containerID="ee374de47b2bbe4c5585c033c9bc204aeb8bfb52fc46b943a7e85f4ceb655027" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.652424 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40127bc0-c09b-4c3f-af93-cdfcaee9d36e-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"40127bc0-c09b-4c3f-af93-cdfcaee9d36e\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.652580 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s5hjs\" (UniqueName: \"kubernetes.io/projected/40127bc0-c09b-4c3f-af93-cdfcaee9d36e-kube-api-access-s5hjs\") pod \"cinder-scheduler-0\" (UID: \"40127bc0-c09b-4c3f-af93-cdfcaee9d36e\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.652720 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/40127bc0-c09b-4c3f-af93-cdfcaee9d36e-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"40127bc0-c09b-4c3f-af93-cdfcaee9d36e\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.652783 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40127bc0-c09b-4c3f-af93-cdfcaee9d36e-config-data\") pod \"cinder-scheduler-0\" (UID: \"40127bc0-c09b-4c3f-af93-cdfcaee9d36e\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.653054 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40127bc0-c09b-4c3f-af93-cdfcaee9d36e-scripts\") pod \"cinder-scheduler-0\" (UID: \"40127bc0-c09b-4c3f-af93-cdfcaee9d36e\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.653326 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/40127bc0-c09b-4c3f-af93-cdfcaee9d36e-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"40127bc0-c09b-4c3f-af93-cdfcaee9d36e\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.755532 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40127bc0-c09b-4c3f-af93-cdfcaee9d36e-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"40127bc0-c09b-4c3f-af93-cdfcaee9d36e\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.755600 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s5hjs\" (UniqueName: \"kubernetes.io/projected/40127bc0-c09b-4c3f-af93-cdfcaee9d36e-kube-api-access-s5hjs\") pod \"cinder-scheduler-0\" (UID: \"40127bc0-c09b-4c3f-af93-cdfcaee9d36e\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.755636 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/40127bc0-c09b-4c3f-af93-cdfcaee9d36e-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"40127bc0-c09b-4c3f-af93-cdfcaee9d36e\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.755664 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40127bc0-c09b-4c3f-af93-cdfcaee9d36e-config-data\") pod \"cinder-scheduler-0\" (UID: \"40127bc0-c09b-4c3f-af93-cdfcaee9d36e\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.755739 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40127bc0-c09b-4c3f-af93-cdfcaee9d36e-scripts\") pod \"cinder-scheduler-0\" (UID: \"40127bc0-c09b-4c3f-af93-cdfcaee9d36e\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.755800 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/40127bc0-c09b-4c3f-af93-cdfcaee9d36e-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"40127bc0-c09b-4c3f-af93-cdfcaee9d36e\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.755879 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/40127bc0-c09b-4c3f-af93-cdfcaee9d36e-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"40127bc0-c09b-4c3f-af93-cdfcaee9d36e\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.769164 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40127bc0-c09b-4c3f-af93-cdfcaee9d36e-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"40127bc0-c09b-4c3f-af93-cdfcaee9d36e\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.769290 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40127bc0-c09b-4c3f-af93-cdfcaee9d36e-scripts\") pod \"cinder-scheduler-0\" (UID: \"40127bc0-c09b-4c3f-af93-cdfcaee9d36e\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.769441 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/40127bc0-c09b-4c3f-af93-cdfcaee9d36e-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"40127bc0-c09b-4c3f-af93-cdfcaee9d36e\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.770214 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40127bc0-c09b-4c3f-af93-cdfcaee9d36e-config-data\") pod \"cinder-scheduler-0\" (UID: \"40127bc0-c09b-4c3f-af93-cdfcaee9d36e\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.774851 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s5hjs\" (UniqueName: \"kubernetes.io/projected/40127bc0-c09b-4c3f-af93-cdfcaee9d36e-kube-api-access-s5hjs\") pod \"cinder-scheduler-0\" (UID: \"40127bc0-c09b-4c3f-af93-cdfcaee9d36e\") " pod="openstack/cinder-scheduler-0" Nov 26 22:42:50 crc kubenswrapper[4903]: I1126 22:42:50.818440 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 22:42:51 crc kubenswrapper[4903]: I1126 22:42:51.346014 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 22:42:51 crc kubenswrapper[4903]: W1126 22:42:51.355255 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod40127bc0_c09b_4c3f_af93_cdfcaee9d36e.slice/crio-870edb96039f9d65fba5dcc273766f6ffc5ee140f9a07cee50b9c7aff1643faa WatchSource:0}: Error finding container 870edb96039f9d65fba5dcc273766f6ffc5ee140f9a07cee50b9c7aff1643faa: Status 404 returned error can't find the container with id 870edb96039f9d65fba5dcc273766f6ffc5ee140f9a07cee50b9c7aff1643faa Nov 26 22:42:51 crc kubenswrapper[4903]: I1126 22:42:51.401019 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"40127bc0-c09b-4c3f-af93-cdfcaee9d36e","Type":"ContainerStarted","Data":"870edb96039f9d65fba5dcc273766f6ffc5ee140f9a07cee50b9c7aff1643faa"} Nov 26 22:42:52 crc kubenswrapper[4903]: I1126 22:42:52.051224 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7fa1244c-f57b-4679-8845-ad503d45384f" path="/var/lib/kubelet/pods/7fa1244c-f57b-4679-8845-ad503d45384f/volumes" Nov 26 22:42:52 crc kubenswrapper[4903]: I1126 22:42:52.052521 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="892b1cdc-def0-4620-b3b6-d9cc248b33bb" path="/var/lib/kubelet/pods/892b1cdc-def0-4620-b3b6-d9cc248b33bb/volumes" Nov 26 22:42:52 crc kubenswrapper[4903]: I1126 22:42:52.053425 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5e25b6a-d8fd-404a-b7f9-7ebad060a071" path="/var/lib/kubelet/pods/c5e25b6a-d8fd-404a-b7f9-7ebad060a071/volumes" Nov 26 22:42:52 crc kubenswrapper[4903]: I1126 22:42:52.416227 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"40127bc0-c09b-4c3f-af93-cdfcaee9d36e","Type":"ContainerStarted","Data":"158eb51391ec973705e1b26e73d45cb486a176477c559f61e2332cddb9e9d660"} Nov 26 22:42:53 crc kubenswrapper[4903]: I1126 22:42:53.456722 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"40127bc0-c09b-4c3f-af93-cdfcaee9d36e","Type":"ContainerStarted","Data":"0430b4e7a5499c1bcfef3194ede12932e95d49e8393b95df161e43a64dd91367"} Nov 26 22:42:53 crc kubenswrapper[4903]: I1126 22:42:53.496870 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.496851783 podStartE2EDuration="3.496851783s" podCreationTimestamp="2025-11-26 22:42:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:42:53.484502852 +0000 UTC m=+1302.174737762" watchObservedRunningTime="2025-11-26 22:42:53.496851783 +0000 UTC m=+1302.187086693" Nov 26 22:42:54 crc kubenswrapper[4903]: I1126 22:42:54.949088 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-747d9754b8-8kqq9" Nov 26 22:42:54 crc kubenswrapper[4903]: I1126 22:42:54.949423 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-747d9754b8-8kqq9" Nov 26 22:42:55 crc kubenswrapper[4903]: I1126 22:42:55.347932 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 26 22:42:55 crc kubenswrapper[4903]: I1126 22:42:55.818815 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 26 22:42:56 crc kubenswrapper[4903]: I1126 22:42:56.206317 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-fcdf5f968-7ppxk" Nov 26 22:42:56 crc kubenswrapper[4903]: I1126 22:42:56.353587 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 26 22:42:56 crc kubenswrapper[4903]: I1126 22:42:56.355461 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 22:42:56 crc kubenswrapper[4903]: I1126 22:42:56.359568 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 26 22:42:56 crc kubenswrapper[4903]: I1126 22:42:56.359618 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 26 22:42:56 crc kubenswrapper[4903]: I1126 22:42:56.359946 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-hvxfr" Nov 26 22:42:56 crc kubenswrapper[4903]: I1126 22:42:56.377481 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 26 22:42:56 crc kubenswrapper[4903]: I1126 22:42:56.513890 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mzfgp\" (UniqueName: \"kubernetes.io/projected/0057fb69-c2f2-4f5f-ad83-5fd16fcc99b0-kube-api-access-mzfgp\") pod \"openstackclient\" (UID: \"0057fb69-c2f2-4f5f-ad83-5fd16fcc99b0\") " pod="openstack/openstackclient" Nov 26 22:42:56 crc kubenswrapper[4903]: I1126 22:42:56.513939 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/0057fb69-c2f2-4f5f-ad83-5fd16fcc99b0-openstack-config\") pod \"openstackclient\" (UID: \"0057fb69-c2f2-4f5f-ad83-5fd16fcc99b0\") " pod="openstack/openstackclient" Nov 26 22:42:56 crc kubenswrapper[4903]: I1126 22:42:56.514219 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0057fb69-c2f2-4f5f-ad83-5fd16fcc99b0-combined-ca-bundle\") pod \"openstackclient\" (UID: \"0057fb69-c2f2-4f5f-ad83-5fd16fcc99b0\") " pod="openstack/openstackclient" Nov 26 22:42:56 crc kubenswrapper[4903]: I1126 22:42:56.514427 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/0057fb69-c2f2-4f5f-ad83-5fd16fcc99b0-openstack-config-secret\") pod \"openstackclient\" (UID: \"0057fb69-c2f2-4f5f-ad83-5fd16fcc99b0\") " pod="openstack/openstackclient" Nov 26 22:42:56 crc kubenswrapper[4903]: I1126 22:42:56.617009 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mzfgp\" (UniqueName: \"kubernetes.io/projected/0057fb69-c2f2-4f5f-ad83-5fd16fcc99b0-kube-api-access-mzfgp\") pod \"openstackclient\" (UID: \"0057fb69-c2f2-4f5f-ad83-5fd16fcc99b0\") " pod="openstack/openstackclient" Nov 26 22:42:56 crc kubenswrapper[4903]: I1126 22:42:56.617111 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/0057fb69-c2f2-4f5f-ad83-5fd16fcc99b0-openstack-config\") pod \"openstackclient\" (UID: \"0057fb69-c2f2-4f5f-ad83-5fd16fcc99b0\") " pod="openstack/openstackclient" Nov 26 22:42:56 crc kubenswrapper[4903]: I1126 22:42:56.618073 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/0057fb69-c2f2-4f5f-ad83-5fd16fcc99b0-openstack-config\") pod \"openstackclient\" (UID: \"0057fb69-c2f2-4f5f-ad83-5fd16fcc99b0\") " pod="openstack/openstackclient" Nov 26 22:42:56 crc kubenswrapper[4903]: I1126 22:42:56.618316 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0057fb69-c2f2-4f5f-ad83-5fd16fcc99b0-combined-ca-bundle\") pod \"openstackclient\" (UID: \"0057fb69-c2f2-4f5f-ad83-5fd16fcc99b0\") " pod="openstack/openstackclient" Nov 26 22:42:56 crc kubenswrapper[4903]: I1126 22:42:56.619408 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/0057fb69-c2f2-4f5f-ad83-5fd16fcc99b0-openstack-config-secret\") pod \"openstackclient\" (UID: \"0057fb69-c2f2-4f5f-ad83-5fd16fcc99b0\") " pod="openstack/openstackclient" Nov 26 22:42:56 crc kubenswrapper[4903]: I1126 22:42:56.625398 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0057fb69-c2f2-4f5f-ad83-5fd16fcc99b0-combined-ca-bundle\") pod \"openstackclient\" (UID: \"0057fb69-c2f2-4f5f-ad83-5fd16fcc99b0\") " pod="openstack/openstackclient" Nov 26 22:42:56 crc kubenswrapper[4903]: I1126 22:42:56.629856 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/0057fb69-c2f2-4f5f-ad83-5fd16fcc99b0-openstack-config-secret\") pod \"openstackclient\" (UID: \"0057fb69-c2f2-4f5f-ad83-5fd16fcc99b0\") " pod="openstack/openstackclient" Nov 26 22:42:56 crc kubenswrapper[4903]: I1126 22:42:56.637235 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mzfgp\" (UniqueName: \"kubernetes.io/projected/0057fb69-c2f2-4f5f-ad83-5fd16fcc99b0-kube-api-access-mzfgp\") pod \"openstackclient\" (UID: \"0057fb69-c2f2-4f5f-ad83-5fd16fcc99b0\") " pod="openstack/openstackclient" Nov 26 22:42:56 crc kubenswrapper[4903]: I1126 22:42:56.678812 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 22:42:57 crc kubenswrapper[4903]: I1126 22:42:57.186364 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 26 22:42:57 crc kubenswrapper[4903]: W1126 22:42:57.188076 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0057fb69_c2f2_4f5f_ad83_5fd16fcc99b0.slice/crio-b029e33e43bf5992f25f7810fa7270ccd40ee23298ae25a35197107fc50d8655 WatchSource:0}: Error finding container b029e33e43bf5992f25f7810fa7270ccd40ee23298ae25a35197107fc50d8655: Status 404 returned error can't find the container with id b029e33e43bf5992f25f7810fa7270ccd40ee23298ae25a35197107fc50d8655 Nov 26 22:42:57 crc kubenswrapper[4903]: I1126 22:42:57.507983 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"0057fb69-c2f2-4f5f-ad83-5fd16fcc99b0","Type":"ContainerStarted","Data":"b029e33e43bf5992f25f7810fa7270ccd40ee23298ae25a35197107fc50d8655"} Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.301169 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-xw96b"] Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.303857 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-xw96b" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.317799 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-xw96b"] Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.408565 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5tfjs\" (UniqueName: \"kubernetes.io/projected/b845761e-5639-47f8-b33e-982f99d9e575-kube-api-access-5tfjs\") pod \"nova-api-db-create-xw96b\" (UID: \"b845761e-5639-47f8-b33e-982f99d9e575\") " pod="openstack/nova-api-db-create-xw96b" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.409028 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b845761e-5639-47f8-b33e-982f99d9e575-operator-scripts\") pod \"nova-api-db-create-xw96b\" (UID: \"b845761e-5639-47f8-b33e-982f99d9e575\") " pod="openstack/nova-api-db-create-xw96b" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.412748 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-6b7jl"] Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.414191 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-6b7jl" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.438192 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-6b7jl"] Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.446760 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-3d57-account-create-update-kbz2q"] Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.448120 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-3d57-account-create-update-kbz2q" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.450016 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.459810 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-3d57-account-create-update-kbz2q"] Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.510637 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/723772d5-ee6f-493e-94f1-7b9804ec1957-operator-scripts\") pod \"nova-cell0-db-create-6b7jl\" (UID: \"723772d5-ee6f-493e-94f1-7b9804ec1957\") " pod="openstack/nova-cell0-db-create-6b7jl" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.510742 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gd8p9\" (UniqueName: \"kubernetes.io/projected/723772d5-ee6f-493e-94f1-7b9804ec1957-kube-api-access-gd8p9\") pod \"nova-cell0-db-create-6b7jl\" (UID: \"723772d5-ee6f-493e-94f1-7b9804ec1957\") " pod="openstack/nova-cell0-db-create-6b7jl" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.510771 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b845761e-5639-47f8-b33e-982f99d9e575-operator-scripts\") pod \"nova-api-db-create-xw96b\" (UID: \"b845761e-5639-47f8-b33e-982f99d9e575\") " pod="openstack/nova-api-db-create-xw96b" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.510906 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5tfjs\" (UniqueName: \"kubernetes.io/projected/b845761e-5639-47f8-b33e-982f99d9e575-kube-api-access-5tfjs\") pod \"nova-api-db-create-xw96b\" (UID: \"b845761e-5639-47f8-b33e-982f99d9e575\") " pod="openstack/nova-api-db-create-xw96b" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.511858 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b845761e-5639-47f8-b33e-982f99d9e575-operator-scripts\") pod \"nova-api-db-create-xw96b\" (UID: \"b845761e-5639-47f8-b33e-982f99d9e575\") " pod="openstack/nova-api-db-create-xw96b" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.534472 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5tfjs\" (UniqueName: \"kubernetes.io/projected/b845761e-5639-47f8-b33e-982f99d9e575-kube-api-access-5tfjs\") pod \"nova-api-db-create-xw96b\" (UID: \"b845761e-5639-47f8-b33e-982f99d9e575\") " pod="openstack/nova-api-db-create-xw96b" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.627880 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/723772d5-ee6f-493e-94f1-7b9804ec1957-operator-scripts\") pod \"nova-cell0-db-create-6b7jl\" (UID: \"723772d5-ee6f-493e-94f1-7b9804ec1957\") " pod="openstack/nova-cell0-db-create-6b7jl" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.628336 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gd8p9\" (UniqueName: \"kubernetes.io/projected/723772d5-ee6f-493e-94f1-7b9804ec1957-kube-api-access-gd8p9\") pod \"nova-cell0-db-create-6b7jl\" (UID: \"723772d5-ee6f-493e-94f1-7b9804ec1957\") " pod="openstack/nova-cell0-db-create-6b7jl" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.628384 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r7psz\" (UniqueName: \"kubernetes.io/projected/7301bb2b-a968-426c-8c7b-147e84af9d2e-kube-api-access-r7psz\") pod \"nova-api-3d57-account-create-update-kbz2q\" (UID: \"7301bb2b-a968-426c-8c7b-147e84af9d2e\") " pod="openstack/nova-api-3d57-account-create-update-kbz2q" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.628462 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7301bb2b-a968-426c-8c7b-147e84af9d2e-operator-scripts\") pod \"nova-api-3d57-account-create-update-kbz2q\" (UID: \"7301bb2b-a968-426c-8c7b-147e84af9d2e\") " pod="openstack/nova-api-3d57-account-create-update-kbz2q" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.632138 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-78l4c"] Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.633177 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-xw96b" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.635491 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-78l4c" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.637384 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/723772d5-ee6f-493e-94f1-7b9804ec1957-operator-scripts\") pod \"nova-cell0-db-create-6b7jl\" (UID: \"723772d5-ee6f-493e-94f1-7b9804ec1957\") " pod="openstack/nova-cell0-db-create-6b7jl" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.653535 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gd8p9\" (UniqueName: \"kubernetes.io/projected/723772d5-ee6f-493e-94f1-7b9804ec1957-kube-api-access-gd8p9\") pod \"nova-cell0-db-create-6b7jl\" (UID: \"723772d5-ee6f-493e-94f1-7b9804ec1957\") " pod="openstack/nova-cell0-db-create-6b7jl" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.662577 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-f7c7-account-create-update-s7xfc"] Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.665459 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f7c7-account-create-update-s7xfc" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.689777 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.702512 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-78l4c"] Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.713774 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-f7c7-account-create-update-s7xfc"] Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.733827 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-6b7jl" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.734216 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9861636-fbd1-48eb-b179-efaed34ef23a-operator-scripts\") pod \"nova-cell1-db-create-78l4c\" (UID: \"a9861636-fbd1-48eb-b179-efaed34ef23a\") " pod="openstack/nova-cell1-db-create-78l4c" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.734907 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r7psz\" (UniqueName: \"kubernetes.io/projected/7301bb2b-a968-426c-8c7b-147e84af9d2e-kube-api-access-r7psz\") pod \"nova-api-3d57-account-create-update-kbz2q\" (UID: \"7301bb2b-a968-426c-8c7b-147e84af9d2e\") " pod="openstack/nova-api-3d57-account-create-update-kbz2q" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.734986 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pmwsp\" (UniqueName: \"kubernetes.io/projected/a9861636-fbd1-48eb-b179-efaed34ef23a-kube-api-access-pmwsp\") pod \"nova-cell1-db-create-78l4c\" (UID: \"a9861636-fbd1-48eb-b179-efaed34ef23a\") " pod="openstack/nova-cell1-db-create-78l4c" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.735154 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7301bb2b-a968-426c-8c7b-147e84af9d2e-operator-scripts\") pod \"nova-api-3d57-account-create-update-kbz2q\" (UID: \"7301bb2b-a968-426c-8c7b-147e84af9d2e\") " pod="openstack/nova-api-3d57-account-create-update-kbz2q" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.737324 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7301bb2b-a968-426c-8c7b-147e84af9d2e-operator-scripts\") pod \"nova-api-3d57-account-create-update-kbz2q\" (UID: \"7301bb2b-a968-426c-8c7b-147e84af9d2e\") " pod="openstack/nova-api-3d57-account-create-update-kbz2q" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.761669 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r7psz\" (UniqueName: \"kubernetes.io/projected/7301bb2b-a968-426c-8c7b-147e84af9d2e-kube-api-access-r7psz\") pod \"nova-api-3d57-account-create-update-kbz2q\" (UID: \"7301bb2b-a968-426c-8c7b-147e84af9d2e\") " pod="openstack/nova-api-3d57-account-create-update-kbz2q" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.769231 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-3d57-account-create-update-kbz2q" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.808024 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-a856-account-create-update-fr9ds"] Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.809352 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-a856-account-create-update-fr9ds" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.814817 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.824231 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-a856-account-create-update-fr9ds"] Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.847088 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/acc450ee-60c4-4203-9e53-bfc0d0996227-operator-scripts\") pod \"nova-cell0-f7c7-account-create-update-s7xfc\" (UID: \"acc450ee-60c4-4203-9e53-bfc0d0996227\") " pod="openstack/nova-cell0-f7c7-account-create-update-s7xfc" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.847196 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9wwp\" (UniqueName: \"kubernetes.io/projected/acc450ee-60c4-4203-9e53-bfc0d0996227-kube-api-access-g9wwp\") pod \"nova-cell0-f7c7-account-create-update-s7xfc\" (UID: \"acc450ee-60c4-4203-9e53-bfc0d0996227\") " pod="openstack/nova-cell0-f7c7-account-create-update-s7xfc" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.847788 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9861636-fbd1-48eb-b179-efaed34ef23a-operator-scripts\") pod \"nova-cell1-db-create-78l4c\" (UID: \"a9861636-fbd1-48eb-b179-efaed34ef23a\") " pod="openstack/nova-cell1-db-create-78l4c" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.847990 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pmwsp\" (UniqueName: \"kubernetes.io/projected/a9861636-fbd1-48eb-b179-efaed34ef23a-kube-api-access-pmwsp\") pod \"nova-cell1-db-create-78l4c\" (UID: \"a9861636-fbd1-48eb-b179-efaed34ef23a\") " pod="openstack/nova-cell1-db-create-78l4c" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.848924 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9861636-fbd1-48eb-b179-efaed34ef23a-operator-scripts\") pod \"nova-cell1-db-create-78l4c\" (UID: \"a9861636-fbd1-48eb-b179-efaed34ef23a\") " pod="openstack/nova-cell1-db-create-78l4c" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.863600 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pmwsp\" (UniqueName: \"kubernetes.io/projected/a9861636-fbd1-48eb-b179-efaed34ef23a-kube-api-access-pmwsp\") pod \"nova-cell1-db-create-78l4c\" (UID: \"a9861636-fbd1-48eb-b179-efaed34ef23a\") " pod="openstack/nova-cell1-db-create-78l4c" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.950644 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88faddae-4ab0-4bb0-886c-ce747933a8d2-operator-scripts\") pod \"nova-cell1-a856-account-create-update-fr9ds\" (UID: \"88faddae-4ab0-4bb0-886c-ce747933a8d2\") " pod="openstack/nova-cell1-a856-account-create-update-fr9ds" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.950983 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-thqb6\" (UniqueName: \"kubernetes.io/projected/88faddae-4ab0-4bb0-886c-ce747933a8d2-kube-api-access-thqb6\") pod \"nova-cell1-a856-account-create-update-fr9ds\" (UID: \"88faddae-4ab0-4bb0-886c-ce747933a8d2\") " pod="openstack/nova-cell1-a856-account-create-update-fr9ds" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.951039 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/acc450ee-60c4-4203-9e53-bfc0d0996227-operator-scripts\") pod \"nova-cell0-f7c7-account-create-update-s7xfc\" (UID: \"acc450ee-60c4-4203-9e53-bfc0d0996227\") " pod="openstack/nova-cell0-f7c7-account-create-update-s7xfc" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.951077 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9wwp\" (UniqueName: \"kubernetes.io/projected/acc450ee-60c4-4203-9e53-bfc0d0996227-kube-api-access-g9wwp\") pod \"nova-cell0-f7c7-account-create-update-s7xfc\" (UID: \"acc450ee-60c4-4203-9e53-bfc0d0996227\") " pod="openstack/nova-cell0-f7c7-account-create-update-s7xfc" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.952937 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/acc450ee-60c4-4203-9e53-bfc0d0996227-operator-scripts\") pod \"nova-cell0-f7c7-account-create-update-s7xfc\" (UID: \"acc450ee-60c4-4203-9e53-bfc0d0996227\") " pod="openstack/nova-cell0-f7c7-account-create-update-s7xfc" Nov 26 22:43:00 crc kubenswrapper[4903]: I1126 22:43:00.972018 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9wwp\" (UniqueName: \"kubernetes.io/projected/acc450ee-60c4-4203-9e53-bfc0d0996227-kube-api-access-g9wwp\") pod \"nova-cell0-f7c7-account-create-update-s7xfc\" (UID: \"acc450ee-60c4-4203-9e53-bfc0d0996227\") " pod="openstack/nova-cell0-f7c7-account-create-update-s7xfc" Nov 26 22:43:01 crc kubenswrapper[4903]: I1126 22:43:01.053675 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88faddae-4ab0-4bb0-886c-ce747933a8d2-operator-scripts\") pod \"nova-cell1-a856-account-create-update-fr9ds\" (UID: \"88faddae-4ab0-4bb0-886c-ce747933a8d2\") " pod="openstack/nova-cell1-a856-account-create-update-fr9ds" Nov 26 22:43:01 crc kubenswrapper[4903]: I1126 22:43:01.053749 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-thqb6\" (UniqueName: \"kubernetes.io/projected/88faddae-4ab0-4bb0-886c-ce747933a8d2-kube-api-access-thqb6\") pod \"nova-cell1-a856-account-create-update-fr9ds\" (UID: \"88faddae-4ab0-4bb0-886c-ce747933a8d2\") " pod="openstack/nova-cell1-a856-account-create-update-fr9ds" Nov 26 22:43:01 crc kubenswrapper[4903]: I1126 22:43:01.054439 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88faddae-4ab0-4bb0-886c-ce747933a8d2-operator-scripts\") pod \"nova-cell1-a856-account-create-update-fr9ds\" (UID: \"88faddae-4ab0-4bb0-886c-ce747933a8d2\") " pod="openstack/nova-cell1-a856-account-create-update-fr9ds" Nov 26 22:43:01 crc kubenswrapper[4903]: I1126 22:43:01.067015 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-78l4c" Nov 26 22:43:01 crc kubenswrapper[4903]: I1126 22:43:01.079378 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-thqb6\" (UniqueName: \"kubernetes.io/projected/88faddae-4ab0-4bb0-886c-ce747933a8d2-kube-api-access-thqb6\") pod \"nova-cell1-a856-account-create-update-fr9ds\" (UID: \"88faddae-4ab0-4bb0-886c-ce747933a8d2\") " pod="openstack/nova-cell1-a856-account-create-update-fr9ds" Nov 26 22:43:01 crc kubenswrapper[4903]: I1126 22:43:01.080083 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 26 22:43:01 crc kubenswrapper[4903]: I1126 22:43:01.162081 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f7c7-account-create-update-s7xfc" Nov 26 22:43:01 crc kubenswrapper[4903]: I1126 22:43:01.179116 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-a856-account-create-update-fr9ds" Nov 26 22:43:01 crc kubenswrapper[4903]: I1126 22:43:01.275357 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-xw96b"] Nov 26 22:43:01 crc kubenswrapper[4903]: W1126 22:43:01.284241 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb845761e_5639_47f8_b33e_982f99d9e575.slice/crio-50dc94abe8fed16d15b798691b5bd43ae3ed51a90f8438e899437559f8761af1 WatchSource:0}: Error finding container 50dc94abe8fed16d15b798691b5bd43ae3ed51a90f8438e899437559f8761af1: Status 404 returned error can't find the container with id 50dc94abe8fed16d15b798691b5bd43ae3ed51a90f8438e899437559f8761af1 Nov 26 22:43:01 crc kubenswrapper[4903]: I1126 22:43:01.533678 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-3d57-account-create-update-kbz2q"] Nov 26 22:43:01 crc kubenswrapper[4903]: W1126 22:43:01.545075 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7301bb2b_a968_426c_8c7b_147e84af9d2e.slice/crio-dc191e821c887f59a7ca4b31abc9fc7928f6995ebce740ba7dfdc89c02b21087 WatchSource:0}: Error finding container dc191e821c887f59a7ca4b31abc9fc7928f6995ebce740ba7dfdc89c02b21087: Status 404 returned error can't find the container with id dc191e821c887f59a7ca4b31abc9fc7928f6995ebce740ba7dfdc89c02b21087 Nov 26 22:43:01 crc kubenswrapper[4903]: I1126 22:43:01.556379 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-6b7jl"] Nov 26 22:43:01 crc kubenswrapper[4903]: I1126 22:43:01.575563 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-xw96b" event={"ID":"b845761e-5639-47f8-b33e-982f99d9e575","Type":"ContainerStarted","Data":"a708073e0f2b077031c503a6ed2c86aecd807b154f4c65af17e815adf1740eab"} Nov 26 22:43:01 crc kubenswrapper[4903]: I1126 22:43:01.575619 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-xw96b" event={"ID":"b845761e-5639-47f8-b33e-982f99d9e575","Type":"ContainerStarted","Data":"50dc94abe8fed16d15b798691b5bd43ae3ed51a90f8438e899437559f8761af1"} Nov 26 22:43:01 crc kubenswrapper[4903]: I1126 22:43:01.582904 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-3d57-account-create-update-kbz2q" event={"ID":"7301bb2b-a968-426c-8c7b-147e84af9d2e","Type":"ContainerStarted","Data":"dc191e821c887f59a7ca4b31abc9fc7928f6995ebce740ba7dfdc89c02b21087"} Nov 26 22:43:01 crc kubenswrapper[4903]: I1126 22:43:01.598402 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-db-create-xw96b" podStartSLOduration=1.598383558 podStartE2EDuration="1.598383558s" podCreationTimestamp="2025-11-26 22:43:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:43:01.593882767 +0000 UTC m=+1310.284117677" watchObservedRunningTime="2025-11-26 22:43:01.598383558 +0000 UTC m=+1310.288618468" Nov 26 22:43:01 crc kubenswrapper[4903]: I1126 22:43:01.775200 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-78l4c"] Nov 26 22:43:01 crc kubenswrapper[4903]: I1126 22:43:01.799092 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-f7c7-account-create-update-s7xfc"] Nov 26 22:43:01 crc kubenswrapper[4903]: I1126 22:43:01.977937 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-a856-account-create-update-fr9ds"] Nov 26 22:43:02 crc kubenswrapper[4903]: I1126 22:43:02.597345 4903 generic.go:334] "Generic (PLEG): container finished" podID="7301bb2b-a968-426c-8c7b-147e84af9d2e" containerID="7ec60eda21ae74821dcc7d16f84f8b93378b7402ff66bf70b333fe31f0adc7de" exitCode=0 Nov 26 22:43:02 crc kubenswrapper[4903]: I1126 22:43:02.597654 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-3d57-account-create-update-kbz2q" event={"ID":"7301bb2b-a968-426c-8c7b-147e84af9d2e","Type":"ContainerDied","Data":"7ec60eda21ae74821dcc7d16f84f8b93378b7402ff66bf70b333fe31f0adc7de"} Nov 26 22:43:02 crc kubenswrapper[4903]: I1126 22:43:02.604200 4903 generic.go:334] "Generic (PLEG): container finished" podID="88faddae-4ab0-4bb0-886c-ce747933a8d2" containerID="1b3cecdf8ffa6caf25cc440b94e68b03a729d9f7aa461cfe5da517150763d437" exitCode=0 Nov 26 22:43:02 crc kubenswrapper[4903]: I1126 22:43:02.604259 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-a856-account-create-update-fr9ds" event={"ID":"88faddae-4ab0-4bb0-886c-ce747933a8d2","Type":"ContainerDied","Data":"1b3cecdf8ffa6caf25cc440b94e68b03a729d9f7aa461cfe5da517150763d437"} Nov 26 22:43:02 crc kubenswrapper[4903]: I1126 22:43:02.604283 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-a856-account-create-update-fr9ds" event={"ID":"88faddae-4ab0-4bb0-886c-ce747933a8d2","Type":"ContainerStarted","Data":"7957df87a775810e9e471629f599b86151a94beccde4efa85c214f4c171ac8fd"} Nov 26 22:43:02 crc kubenswrapper[4903]: I1126 22:43:02.605664 4903 generic.go:334] "Generic (PLEG): container finished" podID="acc450ee-60c4-4203-9e53-bfc0d0996227" containerID="bd02a539b33242ed344060fd4ba88d9478bfc258dd2d63e898c32f6127340156" exitCode=0 Nov 26 22:43:02 crc kubenswrapper[4903]: I1126 22:43:02.605735 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-f7c7-account-create-update-s7xfc" event={"ID":"acc450ee-60c4-4203-9e53-bfc0d0996227","Type":"ContainerDied","Data":"bd02a539b33242ed344060fd4ba88d9478bfc258dd2d63e898c32f6127340156"} Nov 26 22:43:02 crc kubenswrapper[4903]: I1126 22:43:02.605751 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-f7c7-account-create-update-s7xfc" event={"ID":"acc450ee-60c4-4203-9e53-bfc0d0996227","Type":"ContainerStarted","Data":"12b3258beb6684ac346877df52fb07d517ae743591108590d0ffe8b6ef37525e"} Nov 26 22:43:02 crc kubenswrapper[4903]: I1126 22:43:02.607086 4903 generic.go:334] "Generic (PLEG): container finished" podID="b845761e-5639-47f8-b33e-982f99d9e575" containerID="a708073e0f2b077031c503a6ed2c86aecd807b154f4c65af17e815adf1740eab" exitCode=0 Nov 26 22:43:02 crc kubenswrapper[4903]: I1126 22:43:02.607127 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-xw96b" event={"ID":"b845761e-5639-47f8-b33e-982f99d9e575","Type":"ContainerDied","Data":"a708073e0f2b077031c503a6ed2c86aecd807b154f4c65af17e815adf1740eab"} Nov 26 22:43:02 crc kubenswrapper[4903]: I1126 22:43:02.608878 4903 generic.go:334] "Generic (PLEG): container finished" podID="723772d5-ee6f-493e-94f1-7b9804ec1957" containerID="41ec46f787f4db088b4d9fd45388d9477a16bc1a1ca740c83d6913c3baae4559" exitCode=0 Nov 26 22:43:02 crc kubenswrapper[4903]: I1126 22:43:02.608917 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-6b7jl" event={"ID":"723772d5-ee6f-493e-94f1-7b9804ec1957","Type":"ContainerDied","Data":"41ec46f787f4db088b4d9fd45388d9477a16bc1a1ca740c83d6913c3baae4559"} Nov 26 22:43:02 crc kubenswrapper[4903]: I1126 22:43:02.608932 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-6b7jl" event={"ID":"723772d5-ee6f-493e-94f1-7b9804ec1957","Type":"ContainerStarted","Data":"e4632ea1c9e0834494de31fd524a0b26b4b806e1339e7b6dce0185a60720b50d"} Nov 26 22:43:02 crc kubenswrapper[4903]: I1126 22:43:02.614102 4903 generic.go:334] "Generic (PLEG): container finished" podID="a9861636-fbd1-48eb-b179-efaed34ef23a" containerID="3e487066b0cc9784f92e20198698aa818e63a1baf402525ef7d07dd11df1512c" exitCode=0 Nov 26 22:43:02 crc kubenswrapper[4903]: I1126 22:43:02.614161 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-78l4c" event={"ID":"a9861636-fbd1-48eb-b179-efaed34ef23a","Type":"ContainerDied","Data":"3e487066b0cc9784f92e20198698aa818e63a1baf402525ef7d07dd11df1512c"} Nov 26 22:43:02 crc kubenswrapper[4903]: I1126 22:43:02.614188 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-78l4c" event={"ID":"a9861636-fbd1-48eb-b179-efaed34ef23a","Type":"ContainerStarted","Data":"a577f6114711ba7fbe1b3c7a3d330b651a10663286e7065edfc7dba5dc316888"} Nov 26 22:43:04 crc kubenswrapper[4903]: I1126 22:43:04.857357 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-76df48858c-p4q7x"] Nov 26 22:43:04 crc kubenswrapper[4903]: I1126 22:43:04.859537 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-76df48858c-p4q7x"] Nov 26 22:43:04 crc kubenswrapper[4903]: I1126 22:43:04.859642 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-76df48858c-p4q7x" Nov 26 22:43:04 crc kubenswrapper[4903]: I1126 22:43:04.878742 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 26 22:43:04 crc kubenswrapper[4903]: I1126 22:43:04.879003 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 26 22:43:04 crc kubenswrapper[4903]: I1126 22:43:04.879127 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 26 22:43:04 crc kubenswrapper[4903]: I1126 22:43:04.983778 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/73028630-97ff-425e-9ac8-1b30f1c834c4-public-tls-certs\") pod \"swift-proxy-76df48858c-p4q7x\" (UID: \"73028630-97ff-425e-9ac8-1b30f1c834c4\") " pod="openstack/swift-proxy-76df48858c-p4q7x" Nov 26 22:43:04 crc kubenswrapper[4903]: I1126 22:43:04.983893 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73028630-97ff-425e-9ac8-1b30f1c834c4-run-httpd\") pod \"swift-proxy-76df48858c-p4q7x\" (UID: \"73028630-97ff-425e-9ac8-1b30f1c834c4\") " pod="openstack/swift-proxy-76df48858c-p4q7x" Nov 26 22:43:04 crc kubenswrapper[4903]: I1126 22:43:04.983949 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73028630-97ff-425e-9ac8-1b30f1c834c4-log-httpd\") pod \"swift-proxy-76df48858c-p4q7x\" (UID: \"73028630-97ff-425e-9ac8-1b30f1c834c4\") " pod="openstack/swift-proxy-76df48858c-p4q7x" Nov 26 22:43:04 crc kubenswrapper[4903]: I1126 22:43:04.983978 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73028630-97ff-425e-9ac8-1b30f1c834c4-config-data\") pod \"swift-proxy-76df48858c-p4q7x\" (UID: \"73028630-97ff-425e-9ac8-1b30f1c834c4\") " pod="openstack/swift-proxy-76df48858c-p4q7x" Nov 26 22:43:04 crc kubenswrapper[4903]: I1126 22:43:04.984004 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7b88p\" (UniqueName: \"kubernetes.io/projected/73028630-97ff-425e-9ac8-1b30f1c834c4-kube-api-access-7b88p\") pod \"swift-proxy-76df48858c-p4q7x\" (UID: \"73028630-97ff-425e-9ac8-1b30f1c834c4\") " pod="openstack/swift-proxy-76df48858c-p4q7x" Nov 26 22:43:04 crc kubenswrapper[4903]: I1126 22:43:04.984086 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/73028630-97ff-425e-9ac8-1b30f1c834c4-internal-tls-certs\") pod \"swift-proxy-76df48858c-p4q7x\" (UID: \"73028630-97ff-425e-9ac8-1b30f1c834c4\") " pod="openstack/swift-proxy-76df48858c-p4q7x" Nov 26 22:43:04 crc kubenswrapper[4903]: I1126 22:43:04.984172 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/73028630-97ff-425e-9ac8-1b30f1c834c4-etc-swift\") pod \"swift-proxy-76df48858c-p4q7x\" (UID: \"73028630-97ff-425e-9ac8-1b30f1c834c4\") " pod="openstack/swift-proxy-76df48858c-p4q7x" Nov 26 22:43:04 crc kubenswrapper[4903]: I1126 22:43:04.984217 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73028630-97ff-425e-9ac8-1b30f1c834c4-combined-ca-bundle\") pod \"swift-proxy-76df48858c-p4q7x\" (UID: \"73028630-97ff-425e-9ac8-1b30f1c834c4\") " pod="openstack/swift-proxy-76df48858c-p4q7x" Nov 26 22:43:05 crc kubenswrapper[4903]: I1126 22:43:05.087276 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73028630-97ff-425e-9ac8-1b30f1c834c4-combined-ca-bundle\") pod \"swift-proxy-76df48858c-p4q7x\" (UID: \"73028630-97ff-425e-9ac8-1b30f1c834c4\") " pod="openstack/swift-proxy-76df48858c-p4q7x" Nov 26 22:43:05 crc kubenswrapper[4903]: I1126 22:43:05.087390 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/73028630-97ff-425e-9ac8-1b30f1c834c4-public-tls-certs\") pod \"swift-proxy-76df48858c-p4q7x\" (UID: \"73028630-97ff-425e-9ac8-1b30f1c834c4\") " pod="openstack/swift-proxy-76df48858c-p4q7x" Nov 26 22:43:05 crc kubenswrapper[4903]: I1126 22:43:05.087429 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73028630-97ff-425e-9ac8-1b30f1c834c4-run-httpd\") pod \"swift-proxy-76df48858c-p4q7x\" (UID: \"73028630-97ff-425e-9ac8-1b30f1c834c4\") " pod="openstack/swift-proxy-76df48858c-p4q7x" Nov 26 22:43:05 crc kubenswrapper[4903]: I1126 22:43:05.087484 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73028630-97ff-425e-9ac8-1b30f1c834c4-log-httpd\") pod \"swift-proxy-76df48858c-p4q7x\" (UID: \"73028630-97ff-425e-9ac8-1b30f1c834c4\") " pod="openstack/swift-proxy-76df48858c-p4q7x" Nov 26 22:43:05 crc kubenswrapper[4903]: I1126 22:43:05.087539 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73028630-97ff-425e-9ac8-1b30f1c834c4-config-data\") pod \"swift-proxy-76df48858c-p4q7x\" (UID: \"73028630-97ff-425e-9ac8-1b30f1c834c4\") " pod="openstack/swift-proxy-76df48858c-p4q7x" Nov 26 22:43:05 crc kubenswrapper[4903]: I1126 22:43:05.087567 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7b88p\" (UniqueName: \"kubernetes.io/projected/73028630-97ff-425e-9ac8-1b30f1c834c4-kube-api-access-7b88p\") pod \"swift-proxy-76df48858c-p4q7x\" (UID: \"73028630-97ff-425e-9ac8-1b30f1c834c4\") " pod="openstack/swift-proxy-76df48858c-p4q7x" Nov 26 22:43:05 crc kubenswrapper[4903]: I1126 22:43:05.087652 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/73028630-97ff-425e-9ac8-1b30f1c834c4-internal-tls-certs\") pod \"swift-proxy-76df48858c-p4q7x\" (UID: \"73028630-97ff-425e-9ac8-1b30f1c834c4\") " pod="openstack/swift-proxy-76df48858c-p4q7x" Nov 26 22:43:05 crc kubenswrapper[4903]: I1126 22:43:05.087766 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/73028630-97ff-425e-9ac8-1b30f1c834c4-etc-swift\") pod \"swift-proxy-76df48858c-p4q7x\" (UID: \"73028630-97ff-425e-9ac8-1b30f1c834c4\") " pod="openstack/swift-proxy-76df48858c-p4q7x" Nov 26 22:43:05 crc kubenswrapper[4903]: I1126 22:43:05.088881 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73028630-97ff-425e-9ac8-1b30f1c834c4-run-httpd\") pod \"swift-proxy-76df48858c-p4q7x\" (UID: \"73028630-97ff-425e-9ac8-1b30f1c834c4\") " pod="openstack/swift-proxy-76df48858c-p4q7x" Nov 26 22:43:05 crc kubenswrapper[4903]: I1126 22:43:05.092405 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73028630-97ff-425e-9ac8-1b30f1c834c4-log-httpd\") pod \"swift-proxy-76df48858c-p4q7x\" (UID: \"73028630-97ff-425e-9ac8-1b30f1c834c4\") " pod="openstack/swift-proxy-76df48858c-p4q7x" Nov 26 22:43:05 crc kubenswrapper[4903]: I1126 22:43:05.095436 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/73028630-97ff-425e-9ac8-1b30f1c834c4-internal-tls-certs\") pod \"swift-proxy-76df48858c-p4q7x\" (UID: \"73028630-97ff-425e-9ac8-1b30f1c834c4\") " pod="openstack/swift-proxy-76df48858c-p4q7x" Nov 26 22:43:05 crc kubenswrapper[4903]: I1126 22:43:05.095506 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/73028630-97ff-425e-9ac8-1b30f1c834c4-public-tls-certs\") pod \"swift-proxy-76df48858c-p4q7x\" (UID: \"73028630-97ff-425e-9ac8-1b30f1c834c4\") " pod="openstack/swift-proxy-76df48858c-p4q7x" Nov 26 22:43:05 crc kubenswrapper[4903]: I1126 22:43:05.095573 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73028630-97ff-425e-9ac8-1b30f1c834c4-combined-ca-bundle\") pod \"swift-proxy-76df48858c-p4q7x\" (UID: \"73028630-97ff-425e-9ac8-1b30f1c834c4\") " pod="openstack/swift-proxy-76df48858c-p4q7x" Nov 26 22:43:05 crc kubenswrapper[4903]: I1126 22:43:05.095998 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/73028630-97ff-425e-9ac8-1b30f1c834c4-etc-swift\") pod \"swift-proxy-76df48858c-p4q7x\" (UID: \"73028630-97ff-425e-9ac8-1b30f1c834c4\") " pod="openstack/swift-proxy-76df48858c-p4q7x" Nov 26 22:43:05 crc kubenswrapper[4903]: I1126 22:43:05.102805 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73028630-97ff-425e-9ac8-1b30f1c834c4-config-data\") pod \"swift-proxy-76df48858c-p4q7x\" (UID: \"73028630-97ff-425e-9ac8-1b30f1c834c4\") " pod="openstack/swift-proxy-76df48858c-p4q7x" Nov 26 22:43:05 crc kubenswrapper[4903]: I1126 22:43:05.120584 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7b88p\" (UniqueName: \"kubernetes.io/projected/73028630-97ff-425e-9ac8-1b30f1c834c4-kube-api-access-7b88p\") pod \"swift-proxy-76df48858c-p4q7x\" (UID: \"73028630-97ff-425e-9ac8-1b30f1c834c4\") " pod="openstack/swift-proxy-76df48858c-p4q7x" Nov 26 22:43:05 crc kubenswrapper[4903]: I1126 22:43:05.213531 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-76df48858c-p4q7x" Nov 26 22:43:06 crc kubenswrapper[4903]: I1126 22:43:06.106396 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:43:06 crc kubenswrapper[4903]: I1126 22:43:06.107413 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3f11fc1d-817f-4c2a-a777-a4ac0f568b07" containerName="ceilometer-central-agent" containerID="cri-o://860e8d94bfb9963660da2caaf575ea05f7444de561aa082ad9dd084bd0272218" gracePeriod=30 Nov 26 22:43:06 crc kubenswrapper[4903]: I1126 22:43:06.107548 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3f11fc1d-817f-4c2a-a777-a4ac0f568b07" containerName="sg-core" containerID="cri-o://fff991dc19fb76a695f02d9175f28c9eee9b48598401b45ed8731f57bf9d7bdb" gracePeriod=30 Nov 26 22:43:06 crc kubenswrapper[4903]: I1126 22:43:06.107588 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3f11fc1d-817f-4c2a-a777-a4ac0f568b07" containerName="ceilometer-notification-agent" containerID="cri-o://180065f48eebcb22ee86fd9aefe971cd4dc6d64ab8679491f522210e3458e6be" gracePeriod=30 Nov 26 22:43:06 crc kubenswrapper[4903]: I1126 22:43:06.107540 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3f11fc1d-817f-4c2a-a777-a4ac0f568b07" containerName="proxy-httpd" containerID="cri-o://e264e63eabdb973cf35d856c39ff29b7250aaaecbd7cb02474dd78e0b2ad41a7" gracePeriod=30 Nov 26 22:43:06 crc kubenswrapper[4903]: I1126 22:43:06.136768 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="3f11fc1d-817f-4c2a-a777-a4ac0f568b07" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 502" Nov 26 22:43:06 crc kubenswrapper[4903]: I1126 22:43:06.672187 4903 generic.go:334] "Generic (PLEG): container finished" podID="3f11fc1d-817f-4c2a-a777-a4ac0f568b07" containerID="e264e63eabdb973cf35d856c39ff29b7250aaaecbd7cb02474dd78e0b2ad41a7" exitCode=0 Nov 26 22:43:06 crc kubenswrapper[4903]: I1126 22:43:06.672217 4903 generic.go:334] "Generic (PLEG): container finished" podID="3f11fc1d-817f-4c2a-a777-a4ac0f568b07" containerID="fff991dc19fb76a695f02d9175f28c9eee9b48598401b45ed8731f57bf9d7bdb" exitCode=2 Nov 26 22:43:06 crc kubenswrapper[4903]: I1126 22:43:06.672226 4903 generic.go:334] "Generic (PLEG): container finished" podID="3f11fc1d-817f-4c2a-a777-a4ac0f568b07" containerID="860e8d94bfb9963660da2caaf575ea05f7444de561aa082ad9dd084bd0272218" exitCode=0 Nov 26 22:43:06 crc kubenswrapper[4903]: I1126 22:43:06.672260 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3f11fc1d-817f-4c2a-a777-a4ac0f568b07","Type":"ContainerDied","Data":"e264e63eabdb973cf35d856c39ff29b7250aaaecbd7cb02474dd78e0b2ad41a7"} Nov 26 22:43:06 crc kubenswrapper[4903]: I1126 22:43:06.672307 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3f11fc1d-817f-4c2a-a777-a4ac0f568b07","Type":"ContainerDied","Data":"fff991dc19fb76a695f02d9175f28c9eee9b48598401b45ed8731f57bf9d7bdb"} Nov 26 22:43:06 crc kubenswrapper[4903]: I1126 22:43:06.672318 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3f11fc1d-817f-4c2a-a777-a4ac0f568b07","Type":"ContainerDied","Data":"860e8d94bfb9963660da2caaf575ea05f7444de561aa082ad9dd084bd0272218"} Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.778202 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-f7c7-account-create-update-s7xfc" event={"ID":"acc450ee-60c4-4203-9e53-bfc0d0996227","Type":"ContainerDied","Data":"12b3258beb6684ac346877df52fb07d517ae743591108590d0ffe8b6ef37525e"} Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.778914 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="12b3258beb6684ac346877df52fb07d517ae743591108590d0ffe8b6ef37525e" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.782274 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-xw96b" event={"ID":"b845761e-5639-47f8-b33e-982f99d9e575","Type":"ContainerDied","Data":"50dc94abe8fed16d15b798691b5bd43ae3ed51a90f8438e899437559f8761af1"} Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.782313 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="50dc94abe8fed16d15b798691b5bd43ae3ed51a90f8438e899437559f8761af1" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.798312 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-6b7jl" event={"ID":"723772d5-ee6f-493e-94f1-7b9804ec1957","Type":"ContainerDied","Data":"e4632ea1c9e0834494de31fd524a0b26b4b806e1339e7b6dce0185a60720b50d"} Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.799166 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e4632ea1c9e0834494de31fd524a0b26b4b806e1339e7b6dce0185a60720b50d" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.803888 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-6b7jl" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.804196 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-78l4c" event={"ID":"a9861636-fbd1-48eb-b179-efaed34ef23a","Type":"ContainerDied","Data":"a577f6114711ba7fbe1b3c7a3d330b651a10663286e7065edfc7dba5dc316888"} Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.804235 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a577f6114711ba7fbe1b3c7a3d330b651a10663286e7065edfc7dba5dc316888" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.808137 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-3d57-account-create-update-kbz2q" event={"ID":"7301bb2b-a968-426c-8c7b-147e84af9d2e","Type":"ContainerDied","Data":"dc191e821c887f59a7ca4b31abc9fc7928f6995ebce740ba7dfdc89c02b21087"} Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.808163 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dc191e821c887f59a7ca4b31abc9fc7928f6995ebce740ba7dfdc89c02b21087" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.810067 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-a856-account-create-update-fr9ds" event={"ID":"88faddae-4ab0-4bb0-886c-ce747933a8d2","Type":"ContainerDied","Data":"7957df87a775810e9e471629f599b86151a94beccde4efa85c214f4c171ac8fd"} Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.810095 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7957df87a775810e9e471629f599b86151a94beccde4efa85c214f4c171ac8fd" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.818090 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-xw96b" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.820370 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-78l4c" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.826165 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-a856-account-create-update-fr9ds" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.892197 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/723772d5-ee6f-493e-94f1-7b9804ec1957-operator-scripts\") pod \"723772d5-ee6f-493e-94f1-7b9804ec1957\" (UID: \"723772d5-ee6f-493e-94f1-7b9804ec1957\") " Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.892404 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gd8p9\" (UniqueName: \"kubernetes.io/projected/723772d5-ee6f-493e-94f1-7b9804ec1957-kube-api-access-gd8p9\") pod \"723772d5-ee6f-493e-94f1-7b9804ec1957\" (UID: \"723772d5-ee6f-493e-94f1-7b9804ec1957\") " Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.892443 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pmwsp\" (UniqueName: \"kubernetes.io/projected/a9861636-fbd1-48eb-b179-efaed34ef23a-kube-api-access-pmwsp\") pod \"a9861636-fbd1-48eb-b179-efaed34ef23a\" (UID: \"a9861636-fbd1-48eb-b179-efaed34ef23a\") " Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.892498 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-thqb6\" (UniqueName: \"kubernetes.io/projected/88faddae-4ab0-4bb0-886c-ce747933a8d2-kube-api-access-thqb6\") pod \"88faddae-4ab0-4bb0-886c-ce747933a8d2\" (UID: \"88faddae-4ab0-4bb0-886c-ce747933a8d2\") " Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.892532 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88faddae-4ab0-4bb0-886c-ce747933a8d2-operator-scripts\") pod \"88faddae-4ab0-4bb0-886c-ce747933a8d2\" (UID: \"88faddae-4ab0-4bb0-886c-ce747933a8d2\") " Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.892580 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5tfjs\" (UniqueName: \"kubernetes.io/projected/b845761e-5639-47f8-b33e-982f99d9e575-kube-api-access-5tfjs\") pod \"b845761e-5639-47f8-b33e-982f99d9e575\" (UID: \"b845761e-5639-47f8-b33e-982f99d9e575\") " Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.892612 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9861636-fbd1-48eb-b179-efaed34ef23a-operator-scripts\") pod \"a9861636-fbd1-48eb-b179-efaed34ef23a\" (UID: \"a9861636-fbd1-48eb-b179-efaed34ef23a\") " Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.892639 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b845761e-5639-47f8-b33e-982f99d9e575-operator-scripts\") pod \"b845761e-5639-47f8-b33e-982f99d9e575\" (UID: \"b845761e-5639-47f8-b33e-982f99d9e575\") " Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.893591 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/723772d5-ee6f-493e-94f1-7b9804ec1957-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "723772d5-ee6f-493e-94f1-7b9804ec1957" (UID: "723772d5-ee6f-493e-94f1-7b9804ec1957"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.893662 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b845761e-5639-47f8-b33e-982f99d9e575-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b845761e-5639-47f8-b33e-982f99d9e575" (UID: "b845761e-5639-47f8-b33e-982f99d9e575"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.894891 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/88faddae-4ab0-4bb0-886c-ce747933a8d2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "88faddae-4ab0-4bb0-886c-ce747933a8d2" (UID: "88faddae-4ab0-4bb0-886c-ce747933a8d2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.895566 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-3d57-account-create-update-kbz2q" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.896580 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9861636-fbd1-48eb-b179-efaed34ef23a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a9861636-fbd1-48eb-b179-efaed34ef23a" (UID: "a9861636-fbd1-48eb-b179-efaed34ef23a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.899400 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b845761e-5639-47f8-b33e-982f99d9e575-kube-api-access-5tfjs" (OuterVolumeSpecName: "kube-api-access-5tfjs") pod "b845761e-5639-47f8-b33e-982f99d9e575" (UID: "b845761e-5639-47f8-b33e-982f99d9e575"). InnerVolumeSpecName "kube-api-access-5tfjs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.899866 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/723772d5-ee6f-493e-94f1-7b9804ec1957-kube-api-access-gd8p9" (OuterVolumeSpecName: "kube-api-access-gd8p9") pod "723772d5-ee6f-493e-94f1-7b9804ec1957" (UID: "723772d5-ee6f-493e-94f1-7b9804ec1957"). InnerVolumeSpecName "kube-api-access-gd8p9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.901564 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9861636-fbd1-48eb-b179-efaed34ef23a-kube-api-access-pmwsp" (OuterVolumeSpecName: "kube-api-access-pmwsp") pod "a9861636-fbd1-48eb-b179-efaed34ef23a" (UID: "a9861636-fbd1-48eb-b179-efaed34ef23a"). InnerVolumeSpecName "kube-api-access-pmwsp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.918653 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88faddae-4ab0-4bb0-886c-ce747933a8d2-kube-api-access-thqb6" (OuterVolumeSpecName: "kube-api-access-thqb6") pod "88faddae-4ab0-4bb0-886c-ce747933a8d2" (UID: "88faddae-4ab0-4bb0-886c-ce747933a8d2"). InnerVolumeSpecName "kube-api-access-thqb6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.934577 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f7c7-account-create-update-s7xfc" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.994487 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7301bb2b-a968-426c-8c7b-147e84af9d2e-operator-scripts\") pod \"7301bb2b-a968-426c-8c7b-147e84af9d2e\" (UID: \"7301bb2b-a968-426c-8c7b-147e84af9d2e\") " Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.994919 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r7psz\" (UniqueName: \"kubernetes.io/projected/7301bb2b-a968-426c-8c7b-147e84af9d2e-kube-api-access-r7psz\") pod \"7301bb2b-a968-426c-8c7b-147e84af9d2e\" (UID: \"7301bb2b-a968-426c-8c7b-147e84af9d2e\") " Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.995044 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/acc450ee-60c4-4203-9e53-bfc0d0996227-operator-scripts\") pod \"acc450ee-60c4-4203-9e53-bfc0d0996227\" (UID: \"acc450ee-60c4-4203-9e53-bfc0d0996227\") " Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.995105 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g9wwp\" (UniqueName: \"kubernetes.io/projected/acc450ee-60c4-4203-9e53-bfc0d0996227-kube-api-access-g9wwp\") pod \"acc450ee-60c4-4203-9e53-bfc0d0996227\" (UID: \"acc450ee-60c4-4203-9e53-bfc0d0996227\") " Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.995234 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7301bb2b-a968-426c-8c7b-147e84af9d2e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7301bb2b-a968-426c-8c7b-147e84af9d2e" (UID: "7301bb2b-a968-426c-8c7b-147e84af9d2e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.995568 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/acc450ee-60c4-4203-9e53-bfc0d0996227-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "acc450ee-60c4-4203-9e53-bfc0d0996227" (UID: "acc450ee-60c4-4203-9e53-bfc0d0996227"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.995893 4903 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b845761e-5639-47f8-b33e-982f99d9e575-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.995912 4903 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/723772d5-ee6f-493e-94f1-7b9804ec1957-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.995920 4903 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7301bb2b-a968-426c-8c7b-147e84af9d2e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.995929 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gd8p9\" (UniqueName: \"kubernetes.io/projected/723772d5-ee6f-493e-94f1-7b9804ec1957-kube-api-access-gd8p9\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.995959 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pmwsp\" (UniqueName: \"kubernetes.io/projected/a9861636-fbd1-48eb-b179-efaed34ef23a-kube-api-access-pmwsp\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.995967 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-thqb6\" (UniqueName: \"kubernetes.io/projected/88faddae-4ab0-4bb0-886c-ce747933a8d2-kube-api-access-thqb6\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.995978 4903 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88faddae-4ab0-4bb0-886c-ce747933a8d2-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.995986 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5tfjs\" (UniqueName: \"kubernetes.io/projected/b845761e-5639-47f8-b33e-982f99d9e575-kube-api-access-5tfjs\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.995994 4903 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9861636-fbd1-48eb-b179-efaed34ef23a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.996002 4903 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/acc450ee-60c4-4203-9e53-bfc0d0996227-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.998581 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7301bb2b-a968-426c-8c7b-147e84af9d2e-kube-api-access-r7psz" (OuterVolumeSpecName: "kube-api-access-r7psz") pod "7301bb2b-a968-426c-8c7b-147e84af9d2e" (UID: "7301bb2b-a968-426c-8c7b-147e84af9d2e"). InnerVolumeSpecName "kube-api-access-r7psz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:43:08 crc kubenswrapper[4903]: I1126 22:43:08.998732 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/acc450ee-60c4-4203-9e53-bfc0d0996227-kube-api-access-g9wwp" (OuterVolumeSpecName: "kube-api-access-g9wwp") pod "acc450ee-60c4-4203-9e53-bfc0d0996227" (UID: "acc450ee-60c4-4203-9e53-bfc0d0996227"). InnerVolumeSpecName "kube-api-access-g9wwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:43:09 crc kubenswrapper[4903]: I1126 22:43:09.097825 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r7psz\" (UniqueName: \"kubernetes.io/projected/7301bb2b-a968-426c-8c7b-147e84af9d2e-kube-api-access-r7psz\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:09 crc kubenswrapper[4903]: I1126 22:43:09.097855 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g9wwp\" (UniqueName: \"kubernetes.io/projected/acc450ee-60c4-4203-9e53-bfc0d0996227-kube-api-access-g9wwp\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:09 crc kubenswrapper[4903]: W1126 22:43:09.223728 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod73028630_97ff_425e_9ac8_1b30f1c834c4.slice/crio-30ad418b7727bec71b8ad5882cf823d3490bd3eb0103711268a9c4f109a75b0b WatchSource:0}: Error finding container 30ad418b7727bec71b8ad5882cf823d3490bd3eb0103711268a9c4f109a75b0b: Status 404 returned error can't find the container with id 30ad418b7727bec71b8ad5882cf823d3490bd3eb0103711268a9c4f109a75b0b Nov 26 22:43:09 crc kubenswrapper[4903]: I1126 22:43:09.230021 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-76df48858c-p4q7x"] Nov 26 22:43:09 crc kubenswrapper[4903]: I1126 22:43:09.823409 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-76df48858c-p4q7x" event={"ID":"73028630-97ff-425e-9ac8-1b30f1c834c4","Type":"ContainerStarted","Data":"882467c67e2f116a6a415bf4057eae6ee5f62cdb8222bcd93fb35ea8fc983024"} Nov 26 22:43:09 crc kubenswrapper[4903]: I1126 22:43:09.825006 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-76df48858c-p4q7x" event={"ID":"73028630-97ff-425e-9ac8-1b30f1c834c4","Type":"ContainerStarted","Data":"6dc110ba5c27cc491f8d13902f51cdc7b06075de733b4118c7d26cc94f6e48e8"} Nov 26 22:43:09 crc kubenswrapper[4903]: I1126 22:43:09.825074 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-76df48858c-p4q7x" event={"ID":"73028630-97ff-425e-9ac8-1b30f1c834c4","Type":"ContainerStarted","Data":"30ad418b7727bec71b8ad5882cf823d3490bd3eb0103711268a9c4f109a75b0b"} Nov 26 22:43:09 crc kubenswrapper[4903]: I1126 22:43:09.826174 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-76df48858c-p4q7x" Nov 26 22:43:09 crc kubenswrapper[4903]: I1126 22:43:09.826277 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-76df48858c-p4q7x" Nov 26 22:43:09 crc kubenswrapper[4903]: I1126 22:43:09.829713 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f7c7-account-create-update-s7xfc" Nov 26 22:43:09 crc kubenswrapper[4903]: I1126 22:43:09.832889 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-6b7jl" Nov 26 22:43:09 crc kubenswrapper[4903]: I1126 22:43:09.832918 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"0057fb69-c2f2-4f5f-ad83-5fd16fcc99b0","Type":"ContainerStarted","Data":"e5eca0c4708fa880b30bc3a12b6e9c839189f8d594ad7824dde84cd9195f70f1"} Nov 26 22:43:09 crc kubenswrapper[4903]: I1126 22:43:09.832925 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-3d57-account-create-update-kbz2q" Nov 26 22:43:09 crc kubenswrapper[4903]: I1126 22:43:09.832890 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-78l4c" Nov 26 22:43:09 crc kubenswrapper[4903]: I1126 22:43:09.833019 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-xw96b" Nov 26 22:43:09 crc kubenswrapper[4903]: I1126 22:43:09.838768 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-a856-account-create-update-fr9ds" Nov 26 22:43:09 crc kubenswrapper[4903]: I1126 22:43:09.882921 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-76df48858c-p4q7x" podStartSLOduration=5.8829008 podStartE2EDuration="5.8829008s" podCreationTimestamp="2025-11-26 22:43:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:43:09.868648438 +0000 UTC m=+1318.558883388" watchObservedRunningTime="2025-11-26 22:43:09.8829008 +0000 UTC m=+1318.573135710" Nov 26 22:43:09 crc kubenswrapper[4903]: I1126 22:43:09.927221 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.476053758 podStartE2EDuration="13.927193139s" podCreationTimestamp="2025-11-26 22:42:56 +0000 UTC" firstStartedPulling="2025-11-26 22:42:57.193820175 +0000 UTC m=+1305.884055095" lastFinishedPulling="2025-11-26 22:43:08.644959546 +0000 UTC m=+1317.335194476" observedRunningTime="2025-11-26 22:43:09.890448652 +0000 UTC m=+1318.580683602" watchObservedRunningTime="2025-11-26 22:43:09.927193139 +0000 UTC m=+1318.617428049" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.228041 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-79cc4cbd9-r6z9s"] Nov 26 22:43:10 crc kubenswrapper[4903]: E1126 22:43:10.230078 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acc450ee-60c4-4203-9e53-bfc0d0996227" containerName="mariadb-account-create-update" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.230099 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="acc450ee-60c4-4203-9e53-bfc0d0996227" containerName="mariadb-account-create-update" Nov 26 22:43:10 crc kubenswrapper[4903]: E1126 22:43:10.230124 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9861636-fbd1-48eb-b179-efaed34ef23a" containerName="mariadb-database-create" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.230131 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9861636-fbd1-48eb-b179-efaed34ef23a" containerName="mariadb-database-create" Nov 26 22:43:10 crc kubenswrapper[4903]: E1126 22:43:10.230149 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88faddae-4ab0-4bb0-886c-ce747933a8d2" containerName="mariadb-account-create-update" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.230156 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="88faddae-4ab0-4bb0-886c-ce747933a8d2" containerName="mariadb-account-create-update" Nov 26 22:43:10 crc kubenswrapper[4903]: E1126 22:43:10.230166 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b845761e-5639-47f8-b33e-982f99d9e575" containerName="mariadb-database-create" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.230172 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="b845761e-5639-47f8-b33e-982f99d9e575" containerName="mariadb-database-create" Nov 26 22:43:10 crc kubenswrapper[4903]: E1126 22:43:10.230195 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7301bb2b-a968-426c-8c7b-147e84af9d2e" containerName="mariadb-account-create-update" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.230201 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="7301bb2b-a968-426c-8c7b-147e84af9d2e" containerName="mariadb-account-create-update" Nov 26 22:43:10 crc kubenswrapper[4903]: E1126 22:43:10.230220 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="723772d5-ee6f-493e-94f1-7b9804ec1957" containerName="mariadb-database-create" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.230226 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="723772d5-ee6f-493e-94f1-7b9804ec1957" containerName="mariadb-database-create" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.230669 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9861636-fbd1-48eb-b179-efaed34ef23a" containerName="mariadb-database-create" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.230728 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="88faddae-4ab0-4bb0-886c-ce747933a8d2" containerName="mariadb-account-create-update" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.230748 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="acc450ee-60c4-4203-9e53-bfc0d0996227" containerName="mariadb-account-create-update" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.230828 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="723772d5-ee6f-493e-94f1-7b9804ec1957" containerName="mariadb-database-create" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.230860 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="b845761e-5639-47f8-b33e-982f99d9e575" containerName="mariadb-database-create" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.230882 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="7301bb2b-a968-426c-8c7b-147e84af9d2e" containerName="mariadb-account-create-update" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.237437 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-79cc4cbd9-r6z9s" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.250357 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.250677 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.250868 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-9xvp4" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.257864 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k7lpl\" (UniqueName: \"kubernetes.io/projected/b2447923-029d-4799-9adf-bd5fd9d44338-kube-api-access-k7lpl\") pod \"heat-engine-79cc4cbd9-r6z9s\" (UID: \"b2447923-029d-4799-9adf-bd5fd9d44338\") " pod="openstack/heat-engine-79cc4cbd9-r6z9s" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.258268 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2447923-029d-4799-9adf-bd5fd9d44338-config-data\") pod \"heat-engine-79cc4cbd9-r6z9s\" (UID: \"b2447923-029d-4799-9adf-bd5fd9d44338\") " pod="openstack/heat-engine-79cc4cbd9-r6z9s" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.258330 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b2447923-029d-4799-9adf-bd5fd9d44338-config-data-custom\") pod \"heat-engine-79cc4cbd9-r6z9s\" (UID: \"b2447923-029d-4799-9adf-bd5fd9d44338\") " pod="openstack/heat-engine-79cc4cbd9-r6z9s" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.258356 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2447923-029d-4799-9adf-bd5fd9d44338-combined-ca-bundle\") pod \"heat-engine-79cc4cbd9-r6z9s\" (UID: \"b2447923-029d-4799-9adf-bd5fd9d44338\") " pod="openstack/heat-engine-79cc4cbd9-r6z9s" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.302795 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-79cc4cbd9-r6z9s"] Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.361709 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k7lpl\" (UniqueName: \"kubernetes.io/projected/b2447923-029d-4799-9adf-bd5fd9d44338-kube-api-access-k7lpl\") pod \"heat-engine-79cc4cbd9-r6z9s\" (UID: \"b2447923-029d-4799-9adf-bd5fd9d44338\") " pod="openstack/heat-engine-79cc4cbd9-r6z9s" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.362201 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2447923-029d-4799-9adf-bd5fd9d44338-config-data\") pod \"heat-engine-79cc4cbd9-r6z9s\" (UID: \"b2447923-029d-4799-9adf-bd5fd9d44338\") " pod="openstack/heat-engine-79cc4cbd9-r6z9s" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.362266 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b2447923-029d-4799-9adf-bd5fd9d44338-config-data-custom\") pod \"heat-engine-79cc4cbd9-r6z9s\" (UID: \"b2447923-029d-4799-9adf-bd5fd9d44338\") " pod="openstack/heat-engine-79cc4cbd9-r6z9s" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.362286 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2447923-029d-4799-9adf-bd5fd9d44338-combined-ca-bundle\") pod \"heat-engine-79cc4cbd9-r6z9s\" (UID: \"b2447923-029d-4799-9adf-bd5fd9d44338\") " pod="openstack/heat-engine-79cc4cbd9-r6z9s" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.393857 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2447923-029d-4799-9adf-bd5fd9d44338-config-data\") pod \"heat-engine-79cc4cbd9-r6z9s\" (UID: \"b2447923-029d-4799-9adf-bd5fd9d44338\") " pod="openstack/heat-engine-79cc4cbd9-r6z9s" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.394374 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2447923-029d-4799-9adf-bd5fd9d44338-combined-ca-bundle\") pod \"heat-engine-79cc4cbd9-r6z9s\" (UID: \"b2447923-029d-4799-9adf-bd5fd9d44338\") " pod="openstack/heat-engine-79cc4cbd9-r6z9s" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.400356 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k7lpl\" (UniqueName: \"kubernetes.io/projected/b2447923-029d-4799-9adf-bd5fd9d44338-kube-api-access-k7lpl\") pod \"heat-engine-79cc4cbd9-r6z9s\" (UID: \"b2447923-029d-4799-9adf-bd5fd9d44338\") " pod="openstack/heat-engine-79cc4cbd9-r6z9s" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.407382 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b2447923-029d-4799-9adf-bd5fd9d44338-config-data-custom\") pod \"heat-engine-79cc4cbd9-r6z9s\" (UID: \"b2447923-029d-4799-9adf-bd5fd9d44338\") " pod="openstack/heat-engine-79cc4cbd9-r6z9s" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.408809 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7756b9d78c-8c58d"] Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.436664 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.440841 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7756b9d78c-8c58d"] Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.456321 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-bb4f45b8c-tn749"] Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.460043 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-bb4f45b8c-tn749" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.462172 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.504584 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-7bd5bd57cd-5v7f2"] Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.506739 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7bd5bd57cd-5v7f2" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.510420 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.514816 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-bb4f45b8c-tn749"] Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.539670 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-7bd5bd57cd-5v7f2"] Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.565684 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c8e8a58-424c-4199-82a1-4597cbab5011-combined-ca-bundle\") pod \"heat-cfnapi-bb4f45b8c-tn749\" (UID: \"6c8e8a58-424c-4199-82a1-4597cbab5011\") " pod="openstack/heat-cfnapi-bb4f45b8c-tn749" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.565754 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ae631441-ed7e-48cc-8d5a-6dd39122a07a-dns-svc\") pod \"dnsmasq-dns-7756b9d78c-8c58d\" (UID: \"ae631441-ed7e-48cc-8d5a-6dd39122a07a\") " pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.565792 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ae631441-ed7e-48cc-8d5a-6dd39122a07a-ovsdbserver-sb\") pod \"dnsmasq-dns-7756b9d78c-8c58d\" (UID: \"ae631441-ed7e-48cc-8d5a-6dd39122a07a\") " pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.565840 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6c8e8a58-424c-4199-82a1-4597cbab5011-config-data-custom\") pod \"heat-cfnapi-bb4f45b8c-tn749\" (UID: \"6c8e8a58-424c-4199-82a1-4597cbab5011\") " pod="openstack/heat-cfnapi-bb4f45b8c-tn749" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.565892 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gt5rj\" (UniqueName: \"kubernetes.io/projected/ae631441-ed7e-48cc-8d5a-6dd39122a07a-kube-api-access-gt5rj\") pod \"dnsmasq-dns-7756b9d78c-8c58d\" (UID: \"ae631441-ed7e-48cc-8d5a-6dd39122a07a\") " pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.566029 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c8e8a58-424c-4199-82a1-4597cbab5011-config-data\") pod \"heat-cfnapi-bb4f45b8c-tn749\" (UID: \"6c8e8a58-424c-4199-82a1-4597cbab5011\") " pod="openstack/heat-cfnapi-bb4f45b8c-tn749" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.566054 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ae631441-ed7e-48cc-8d5a-6dd39122a07a-ovsdbserver-nb\") pod \"dnsmasq-dns-7756b9d78c-8c58d\" (UID: \"ae631441-ed7e-48cc-8d5a-6dd39122a07a\") " pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.566097 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ae631441-ed7e-48cc-8d5a-6dd39122a07a-dns-swift-storage-0\") pod \"dnsmasq-dns-7756b9d78c-8c58d\" (UID: \"ae631441-ed7e-48cc-8d5a-6dd39122a07a\") " pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.566320 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kb866\" (UniqueName: \"kubernetes.io/projected/6c8e8a58-424c-4199-82a1-4597cbab5011-kube-api-access-kb866\") pod \"heat-cfnapi-bb4f45b8c-tn749\" (UID: \"6c8e8a58-424c-4199-82a1-4597cbab5011\") " pod="openstack/heat-cfnapi-bb4f45b8c-tn749" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.566380 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae631441-ed7e-48cc-8d5a-6dd39122a07a-config\") pod \"dnsmasq-dns-7756b9d78c-8c58d\" (UID: \"ae631441-ed7e-48cc-8d5a-6dd39122a07a\") " pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.603754 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-79cc4cbd9-r6z9s" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.668077 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c8e8a58-424c-4199-82a1-4597cbab5011-config-data\") pod \"heat-cfnapi-bb4f45b8c-tn749\" (UID: \"6c8e8a58-424c-4199-82a1-4597cbab5011\") " pod="openstack/heat-cfnapi-bb4f45b8c-tn749" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.668116 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ae631441-ed7e-48cc-8d5a-6dd39122a07a-ovsdbserver-nb\") pod \"dnsmasq-dns-7756b9d78c-8c58d\" (UID: \"ae631441-ed7e-48cc-8d5a-6dd39122a07a\") " pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.668172 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ae631441-ed7e-48cc-8d5a-6dd39122a07a-dns-swift-storage-0\") pod \"dnsmasq-dns-7756b9d78c-8c58d\" (UID: \"ae631441-ed7e-48cc-8d5a-6dd39122a07a\") " pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.668205 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kb866\" (UniqueName: \"kubernetes.io/projected/6c8e8a58-424c-4199-82a1-4597cbab5011-kube-api-access-kb866\") pod \"heat-cfnapi-bb4f45b8c-tn749\" (UID: \"6c8e8a58-424c-4199-82a1-4597cbab5011\") " pod="openstack/heat-cfnapi-bb4f45b8c-tn749" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.668236 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae631441-ed7e-48cc-8d5a-6dd39122a07a-config\") pod \"dnsmasq-dns-7756b9d78c-8c58d\" (UID: \"ae631441-ed7e-48cc-8d5a-6dd39122a07a\") " pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.668269 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c8e8a58-424c-4199-82a1-4597cbab5011-combined-ca-bundle\") pod \"heat-cfnapi-bb4f45b8c-tn749\" (UID: \"6c8e8a58-424c-4199-82a1-4597cbab5011\") " pod="openstack/heat-cfnapi-bb4f45b8c-tn749" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.668289 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ae631441-ed7e-48cc-8d5a-6dd39122a07a-dns-svc\") pod \"dnsmasq-dns-7756b9d78c-8c58d\" (UID: \"ae631441-ed7e-48cc-8d5a-6dd39122a07a\") " pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.668318 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ae631441-ed7e-48cc-8d5a-6dd39122a07a-ovsdbserver-sb\") pod \"dnsmasq-dns-7756b9d78c-8c58d\" (UID: \"ae631441-ed7e-48cc-8d5a-6dd39122a07a\") " pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.668351 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff-config-data-custom\") pod \"heat-api-7bd5bd57cd-5v7f2\" (UID: \"a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff\") " pod="openstack/heat-api-7bd5bd57cd-5v7f2" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.668385 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff-combined-ca-bundle\") pod \"heat-api-7bd5bd57cd-5v7f2\" (UID: \"a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff\") " pod="openstack/heat-api-7bd5bd57cd-5v7f2" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.668407 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6c8e8a58-424c-4199-82a1-4597cbab5011-config-data-custom\") pod \"heat-cfnapi-bb4f45b8c-tn749\" (UID: \"6c8e8a58-424c-4199-82a1-4597cbab5011\") " pod="openstack/heat-cfnapi-bb4f45b8c-tn749" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.669470 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff-config-data\") pod \"heat-api-7bd5bd57cd-5v7f2\" (UID: \"a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff\") " pod="openstack/heat-api-7bd5bd57cd-5v7f2" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.670210 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ae631441-ed7e-48cc-8d5a-6dd39122a07a-ovsdbserver-sb\") pod \"dnsmasq-dns-7756b9d78c-8c58d\" (UID: \"ae631441-ed7e-48cc-8d5a-6dd39122a07a\") " pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.670253 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ae631441-ed7e-48cc-8d5a-6dd39122a07a-ovsdbserver-nb\") pod \"dnsmasq-dns-7756b9d78c-8c58d\" (UID: \"ae631441-ed7e-48cc-8d5a-6dd39122a07a\") " pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.670418 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae631441-ed7e-48cc-8d5a-6dd39122a07a-config\") pod \"dnsmasq-dns-7756b9d78c-8c58d\" (UID: \"ae631441-ed7e-48cc-8d5a-6dd39122a07a\") " pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.670440 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p6fl7\" (UniqueName: \"kubernetes.io/projected/a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff-kube-api-access-p6fl7\") pod \"heat-api-7bd5bd57cd-5v7f2\" (UID: \"a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff\") " pod="openstack/heat-api-7bd5bd57cd-5v7f2" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.670529 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gt5rj\" (UniqueName: \"kubernetes.io/projected/ae631441-ed7e-48cc-8d5a-6dd39122a07a-kube-api-access-gt5rj\") pod \"dnsmasq-dns-7756b9d78c-8c58d\" (UID: \"ae631441-ed7e-48cc-8d5a-6dd39122a07a\") " pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.672994 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ae631441-ed7e-48cc-8d5a-6dd39122a07a-dns-swift-storage-0\") pod \"dnsmasq-dns-7756b9d78c-8c58d\" (UID: \"ae631441-ed7e-48cc-8d5a-6dd39122a07a\") " pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.673045 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c8e8a58-424c-4199-82a1-4597cbab5011-combined-ca-bundle\") pod \"heat-cfnapi-bb4f45b8c-tn749\" (UID: \"6c8e8a58-424c-4199-82a1-4597cbab5011\") " pod="openstack/heat-cfnapi-bb4f45b8c-tn749" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.673561 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ae631441-ed7e-48cc-8d5a-6dd39122a07a-dns-svc\") pod \"dnsmasq-dns-7756b9d78c-8c58d\" (UID: \"ae631441-ed7e-48cc-8d5a-6dd39122a07a\") " pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.675674 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6c8e8a58-424c-4199-82a1-4597cbab5011-config-data-custom\") pod \"heat-cfnapi-bb4f45b8c-tn749\" (UID: \"6c8e8a58-424c-4199-82a1-4597cbab5011\") " pod="openstack/heat-cfnapi-bb4f45b8c-tn749" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.680396 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c8e8a58-424c-4199-82a1-4597cbab5011-config-data\") pod \"heat-cfnapi-bb4f45b8c-tn749\" (UID: \"6c8e8a58-424c-4199-82a1-4597cbab5011\") " pod="openstack/heat-cfnapi-bb4f45b8c-tn749" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.685142 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kb866\" (UniqueName: \"kubernetes.io/projected/6c8e8a58-424c-4199-82a1-4597cbab5011-kube-api-access-kb866\") pod \"heat-cfnapi-bb4f45b8c-tn749\" (UID: \"6c8e8a58-424c-4199-82a1-4597cbab5011\") " pod="openstack/heat-cfnapi-bb4f45b8c-tn749" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.686231 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gt5rj\" (UniqueName: \"kubernetes.io/projected/ae631441-ed7e-48cc-8d5a-6dd39122a07a-kube-api-access-gt5rj\") pod \"dnsmasq-dns-7756b9d78c-8c58d\" (UID: \"ae631441-ed7e-48cc-8d5a-6dd39122a07a\") " pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.773005 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff-config-data-custom\") pod \"heat-api-7bd5bd57cd-5v7f2\" (UID: \"a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff\") " pod="openstack/heat-api-7bd5bd57cd-5v7f2" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.773303 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff-combined-ca-bundle\") pod \"heat-api-7bd5bd57cd-5v7f2\" (UID: \"a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff\") " pod="openstack/heat-api-7bd5bd57cd-5v7f2" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.773334 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff-config-data\") pod \"heat-api-7bd5bd57cd-5v7f2\" (UID: \"a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff\") " pod="openstack/heat-api-7bd5bd57cd-5v7f2" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.773366 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p6fl7\" (UniqueName: \"kubernetes.io/projected/a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff-kube-api-access-p6fl7\") pod \"heat-api-7bd5bd57cd-5v7f2\" (UID: \"a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff\") " pod="openstack/heat-api-7bd5bd57cd-5v7f2" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.777895 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff-config-data-custom\") pod \"heat-api-7bd5bd57cd-5v7f2\" (UID: \"a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff\") " pod="openstack/heat-api-7bd5bd57cd-5v7f2" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.778787 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff-config-data\") pod \"heat-api-7bd5bd57cd-5v7f2\" (UID: \"a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff\") " pod="openstack/heat-api-7bd5bd57cd-5v7f2" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.781441 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff-combined-ca-bundle\") pod \"heat-api-7bd5bd57cd-5v7f2\" (UID: \"a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff\") " pod="openstack/heat-api-7bd5bd57cd-5v7f2" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.800330 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p6fl7\" (UniqueName: \"kubernetes.io/projected/a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff-kube-api-access-p6fl7\") pod \"heat-api-7bd5bd57cd-5v7f2\" (UID: \"a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff\") " pod="openstack/heat-api-7bd5bd57cd-5v7f2" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.806214 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.831903 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-bb4f45b8c-tn749" Nov 26 22:43:10 crc kubenswrapper[4903]: I1126 22:43:10.848683 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7bd5bd57cd-5v7f2" Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.123941 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-79cc4cbd9-r6z9s"] Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.153882 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-sfl5w"] Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.155213 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-sfl5w" Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.162330 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-sfl5w"] Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.162923 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.163048 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-kl6hf" Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.163167 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 26 22:43:11 crc kubenswrapper[4903]: W1126 22:43:11.171058 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb2447923_029d_4799_9adf_bd5fd9d44338.slice/crio-1fcf1024577a1379f27af942ce3d8193347965532a85628e5ceb324e52dc492d WatchSource:0}: Error finding container 1fcf1024577a1379f27af942ce3d8193347965532a85628e5ceb324e52dc492d: Status 404 returned error can't find the container with id 1fcf1024577a1379f27af942ce3d8193347965532a85628e5ceb324e52dc492d Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.295454 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6573f0cf-2bad-494e-8288-1c68c4326edb-config-data\") pod \"nova-cell0-conductor-db-sync-sfl5w\" (UID: \"6573f0cf-2bad-494e-8288-1c68c4326edb\") " pod="openstack/nova-cell0-conductor-db-sync-sfl5w" Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.295548 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6573f0cf-2bad-494e-8288-1c68c4326edb-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-sfl5w\" (UID: \"6573f0cf-2bad-494e-8288-1c68c4326edb\") " pod="openstack/nova-cell0-conductor-db-sync-sfl5w" Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.295639 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7znt\" (UniqueName: \"kubernetes.io/projected/6573f0cf-2bad-494e-8288-1c68c4326edb-kube-api-access-v7znt\") pod \"nova-cell0-conductor-db-sync-sfl5w\" (UID: \"6573f0cf-2bad-494e-8288-1c68c4326edb\") " pod="openstack/nova-cell0-conductor-db-sync-sfl5w" Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.295672 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6573f0cf-2bad-494e-8288-1c68c4326edb-scripts\") pod \"nova-cell0-conductor-db-sync-sfl5w\" (UID: \"6573f0cf-2bad-494e-8288-1c68c4326edb\") " pod="openstack/nova-cell0-conductor-db-sync-sfl5w" Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.369550 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7756b9d78c-8c58d"] Nov 26 22:43:11 crc kubenswrapper[4903]: W1126 22:43:11.375145 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podae631441_ed7e_48cc_8d5a_6dd39122a07a.slice/crio-58e33f15f9e825696e9e5cef4b484828ae24da85cd08a4bbf9cb54f56b0bf36b WatchSource:0}: Error finding container 58e33f15f9e825696e9e5cef4b484828ae24da85cd08a4bbf9cb54f56b0bf36b: Status 404 returned error can't find the container with id 58e33f15f9e825696e9e5cef4b484828ae24da85cd08a4bbf9cb54f56b0bf36b Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.398731 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6573f0cf-2bad-494e-8288-1c68c4326edb-config-data\") pod \"nova-cell0-conductor-db-sync-sfl5w\" (UID: \"6573f0cf-2bad-494e-8288-1c68c4326edb\") " pod="openstack/nova-cell0-conductor-db-sync-sfl5w" Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.398813 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6573f0cf-2bad-494e-8288-1c68c4326edb-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-sfl5w\" (UID: \"6573f0cf-2bad-494e-8288-1c68c4326edb\") " pod="openstack/nova-cell0-conductor-db-sync-sfl5w" Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.398904 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7znt\" (UniqueName: \"kubernetes.io/projected/6573f0cf-2bad-494e-8288-1c68c4326edb-kube-api-access-v7znt\") pod \"nova-cell0-conductor-db-sync-sfl5w\" (UID: \"6573f0cf-2bad-494e-8288-1c68c4326edb\") " pod="openstack/nova-cell0-conductor-db-sync-sfl5w" Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.398938 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6573f0cf-2bad-494e-8288-1c68c4326edb-scripts\") pod \"nova-cell0-conductor-db-sync-sfl5w\" (UID: \"6573f0cf-2bad-494e-8288-1c68c4326edb\") " pod="openstack/nova-cell0-conductor-db-sync-sfl5w" Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.403703 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6573f0cf-2bad-494e-8288-1c68c4326edb-config-data\") pod \"nova-cell0-conductor-db-sync-sfl5w\" (UID: \"6573f0cf-2bad-494e-8288-1c68c4326edb\") " pod="openstack/nova-cell0-conductor-db-sync-sfl5w" Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.404079 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6573f0cf-2bad-494e-8288-1c68c4326edb-scripts\") pod \"nova-cell0-conductor-db-sync-sfl5w\" (UID: \"6573f0cf-2bad-494e-8288-1c68c4326edb\") " pod="openstack/nova-cell0-conductor-db-sync-sfl5w" Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.407105 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6573f0cf-2bad-494e-8288-1c68c4326edb-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-sfl5w\" (UID: \"6573f0cf-2bad-494e-8288-1c68c4326edb\") " pod="openstack/nova-cell0-conductor-db-sync-sfl5w" Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.422174 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7znt\" (UniqueName: \"kubernetes.io/projected/6573f0cf-2bad-494e-8288-1c68c4326edb-kube-api-access-v7znt\") pod \"nova-cell0-conductor-db-sync-sfl5w\" (UID: \"6573f0cf-2bad-494e-8288-1c68c4326edb\") " pod="openstack/nova-cell0-conductor-db-sync-sfl5w" Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.509266 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-sfl5w" Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.685719 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-bb4f45b8c-tn749"] Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.810911 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-7bd5bd57cd-5v7f2"] Nov 26 22:43:11 crc kubenswrapper[4903]: W1126 22:43:11.827826 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda3196fd3_9a53_4b48_95d0_49f5d1e4f5ff.slice/crio-f64feddbe08cea2866cbff3aa1f792fe8fbec7cf448d4862166704b6f63ccd98 WatchSource:0}: Error finding container f64feddbe08cea2866cbff3aa1f792fe8fbec7cf448d4862166704b6f63ccd98: Status 404 returned error can't find the container with id f64feddbe08cea2866cbff3aa1f792fe8fbec7cf448d4862166704b6f63ccd98 Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.918099 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7bd5bd57cd-5v7f2" event={"ID":"a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff","Type":"ContainerStarted","Data":"f64feddbe08cea2866cbff3aa1f792fe8fbec7cf448d4862166704b6f63ccd98"} Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.975492 4903 generic.go:334] "Generic (PLEG): container finished" podID="3f11fc1d-817f-4c2a-a777-a4ac0f568b07" containerID="180065f48eebcb22ee86fd9aefe971cd4dc6d64ab8679491f522210e3458e6be" exitCode=0 Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.975595 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3f11fc1d-817f-4c2a-a777-a4ac0f568b07","Type":"ContainerDied","Data":"180065f48eebcb22ee86fd9aefe971cd4dc6d64ab8679491f522210e3458e6be"} Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.975623 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3f11fc1d-817f-4c2a-a777-a4ac0f568b07","Type":"ContainerDied","Data":"5f7e9d96fa81e7cec883018d4f5c4f4378b58bb18f7d2020392a4633f4eb9e73"} Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.975634 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5f7e9d96fa81e7cec883018d4f5c4f4378b58bb18f7d2020392a4633f4eb9e73" Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.986413 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-79cc4cbd9-r6z9s" event={"ID":"b2447923-029d-4799-9adf-bd5fd9d44338","Type":"ContainerStarted","Data":"f2453b02c94fc71e33249f8f1c2f944ea16e1c243f91b038cc4c3a29e398ab7a"} Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.986454 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-79cc4cbd9-r6z9s" event={"ID":"b2447923-029d-4799-9adf-bd5fd9d44338","Type":"ContainerStarted","Data":"1fcf1024577a1379f27af942ce3d8193347965532a85628e5ceb324e52dc492d"} Nov 26 22:43:11 crc kubenswrapper[4903]: I1126 22:43:11.989759 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-79cc4cbd9-r6z9s" Nov 26 22:43:12 crc kubenswrapper[4903]: I1126 22:43:12.001083 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:43:12 crc kubenswrapper[4903]: I1126 22:43:12.025245 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" event={"ID":"ae631441-ed7e-48cc-8d5a-6dd39122a07a","Type":"ContainerStarted","Data":"ee15bc0b8c8ea891f2f46747cac633aac5d252954cbdee237ebe4f1fd7f3ccf2"} Nov 26 22:43:12 crc kubenswrapper[4903]: I1126 22:43:12.025301 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" event={"ID":"ae631441-ed7e-48cc-8d5a-6dd39122a07a","Type":"ContainerStarted","Data":"58e33f15f9e825696e9e5cef4b484828ae24da85cd08a4bbf9cb54f56b0bf36b"} Nov 26 22:43:12 crc kubenswrapper[4903]: I1126 22:43:12.032555 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-79cc4cbd9-r6z9s" podStartSLOduration=2.032537749 podStartE2EDuration="2.032537749s" podCreationTimestamp="2025-11-26 22:43:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:43:12.017807964 +0000 UTC m=+1320.708042884" watchObservedRunningTime="2025-11-26 22:43:12.032537749 +0000 UTC m=+1320.722772659" Nov 26 22:43:12 crc kubenswrapper[4903]: I1126 22:43:12.114200 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-bb4f45b8c-tn749" event={"ID":"6c8e8a58-424c-4199-82a1-4597cbab5011","Type":"ContainerStarted","Data":"ea355f9b610b21cad27283ed6a0b36a0b983c591b6ffb1aab151e095f442818e"} Nov 26 22:43:12 crc kubenswrapper[4903]: I1126 22:43:12.130998 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-config-data\") pod \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\" (UID: \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\") " Nov 26 22:43:12 crc kubenswrapper[4903]: I1126 22:43:12.131034 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-combined-ca-bundle\") pod \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\" (UID: \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\") " Nov 26 22:43:12 crc kubenswrapper[4903]: I1126 22:43:12.131071 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9x9j4\" (UniqueName: \"kubernetes.io/projected/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-kube-api-access-9x9j4\") pod \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\" (UID: \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\") " Nov 26 22:43:12 crc kubenswrapper[4903]: I1126 22:43:12.131286 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-sg-core-conf-yaml\") pod \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\" (UID: \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\") " Nov 26 22:43:12 crc kubenswrapper[4903]: I1126 22:43:12.131333 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-log-httpd\") pod \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\" (UID: \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\") " Nov 26 22:43:12 crc kubenswrapper[4903]: I1126 22:43:12.131452 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-run-httpd\") pod \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\" (UID: \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\") " Nov 26 22:43:12 crc kubenswrapper[4903]: I1126 22:43:12.131498 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-scripts\") pod \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\" (UID: \"3f11fc1d-817f-4c2a-a777-a4ac0f568b07\") " Nov 26 22:43:12 crc kubenswrapper[4903]: I1126 22:43:12.139557 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "3f11fc1d-817f-4c2a-a777-a4ac0f568b07" (UID: "3f11fc1d-817f-4c2a-a777-a4ac0f568b07"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:43:12 crc kubenswrapper[4903]: I1126 22:43:12.142793 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-kube-api-access-9x9j4" (OuterVolumeSpecName: "kube-api-access-9x9j4") pod "3f11fc1d-817f-4c2a-a777-a4ac0f568b07" (UID: "3f11fc1d-817f-4c2a-a777-a4ac0f568b07"). InnerVolumeSpecName "kube-api-access-9x9j4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:43:12 crc kubenswrapper[4903]: I1126 22:43:12.144072 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-scripts" (OuterVolumeSpecName: "scripts") pod "3f11fc1d-817f-4c2a-a777-a4ac0f568b07" (UID: "3f11fc1d-817f-4c2a-a777-a4ac0f568b07"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:12 crc kubenswrapper[4903]: I1126 22:43:12.151807 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "3f11fc1d-817f-4c2a-a777-a4ac0f568b07" (UID: "3f11fc1d-817f-4c2a-a777-a4ac0f568b07"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:43:12 crc kubenswrapper[4903]: I1126 22:43:12.189475 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "3f11fc1d-817f-4c2a-a777-a4ac0f568b07" (UID: "3f11fc1d-817f-4c2a-a777-a4ac0f568b07"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:12 crc kubenswrapper[4903]: I1126 22:43:12.237604 4903 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:12 crc kubenswrapper[4903]: I1126 22:43:12.237634 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9x9j4\" (UniqueName: \"kubernetes.io/projected/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-kube-api-access-9x9j4\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:12 crc kubenswrapper[4903]: I1126 22:43:12.237645 4903 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:12 crc kubenswrapper[4903]: I1126 22:43:12.237653 4903 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:12 crc kubenswrapper[4903]: I1126 22:43:12.237662 4903 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:12 crc kubenswrapper[4903]: I1126 22:43:12.254597 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3f11fc1d-817f-4c2a-a777-a4ac0f568b07" (UID: "3f11fc1d-817f-4c2a-a777-a4ac0f568b07"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:12 crc kubenswrapper[4903]: I1126 22:43:12.294103 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-config-data" (OuterVolumeSpecName: "config-data") pod "3f11fc1d-817f-4c2a-a777-a4ac0f568b07" (UID: "3f11fc1d-817f-4c2a-a777-a4ac0f568b07"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:12 crc kubenswrapper[4903]: I1126 22:43:12.322227 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-sfl5w"] Nov 26 22:43:12 crc kubenswrapper[4903]: I1126 22:43:12.340362 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:12 crc kubenswrapper[4903]: I1126 22:43:12.340390 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f11fc1d-817f-4c2a-a777-a4ac0f568b07-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.071631 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-sfl5w" event={"ID":"6573f0cf-2bad-494e-8288-1c68c4326edb","Type":"ContainerStarted","Data":"239e38403d587306c19bc4055464046046c608f376353f2affec3d30e4040675"} Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.077421 4903 generic.go:334] "Generic (PLEG): container finished" podID="ae631441-ed7e-48cc-8d5a-6dd39122a07a" containerID="ee15bc0b8c8ea891f2f46747cac633aac5d252954cbdee237ebe4f1fd7f3ccf2" exitCode=0 Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.077989 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" event={"ID":"ae631441-ed7e-48cc-8d5a-6dd39122a07a","Type":"ContainerDied","Data":"ee15bc0b8c8ea891f2f46747cac633aac5d252954cbdee237ebe4f1fd7f3ccf2"} Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.078040 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" event={"ID":"ae631441-ed7e-48cc-8d5a-6dd39122a07a","Type":"ContainerStarted","Data":"a7ed7eca4d9ca7cc58709f9238343879b90c2ff5e7accb43d5a1675456d28b4f"} Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.078241 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.078117 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.103588 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" podStartSLOduration=3.103569707 podStartE2EDuration="3.103569707s" podCreationTimestamp="2025-11-26 22:43:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:43:13.099019005 +0000 UTC m=+1321.789253915" watchObservedRunningTime="2025-11-26 22:43:13.103569707 +0000 UTC m=+1321.793804617" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.134430 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.151757 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.163023 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:43:13 crc kubenswrapper[4903]: E1126 22:43:13.163490 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f11fc1d-817f-4c2a-a777-a4ac0f568b07" containerName="proxy-httpd" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.163501 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f11fc1d-817f-4c2a-a777-a4ac0f568b07" containerName="proxy-httpd" Nov 26 22:43:13 crc kubenswrapper[4903]: E1126 22:43:13.163519 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f11fc1d-817f-4c2a-a777-a4ac0f568b07" containerName="sg-core" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.163526 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f11fc1d-817f-4c2a-a777-a4ac0f568b07" containerName="sg-core" Nov 26 22:43:13 crc kubenswrapper[4903]: E1126 22:43:13.163551 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f11fc1d-817f-4c2a-a777-a4ac0f568b07" containerName="ceilometer-central-agent" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.163557 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f11fc1d-817f-4c2a-a777-a4ac0f568b07" containerName="ceilometer-central-agent" Nov 26 22:43:13 crc kubenswrapper[4903]: E1126 22:43:13.163590 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f11fc1d-817f-4c2a-a777-a4ac0f568b07" containerName="ceilometer-notification-agent" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.163596 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f11fc1d-817f-4c2a-a777-a4ac0f568b07" containerName="ceilometer-notification-agent" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.163821 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f11fc1d-817f-4c2a-a777-a4ac0f568b07" containerName="ceilometer-notification-agent" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.163834 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f11fc1d-817f-4c2a-a777-a4ac0f568b07" containerName="sg-core" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.163856 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f11fc1d-817f-4c2a-a777-a4ac0f568b07" containerName="proxy-httpd" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.163865 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f11fc1d-817f-4c2a-a777-a4ac0f568b07" containerName="ceilometer-central-agent" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.165820 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.173088 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.181398 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.186052 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.266155 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-config-data\") pod \"ceilometer-0\" (UID: \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\") " pod="openstack/ceilometer-0" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.266225 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\") " pod="openstack/ceilometer-0" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.266286 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-scripts\") pod \"ceilometer-0\" (UID: \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\") " pod="openstack/ceilometer-0" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.266365 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\") " pod="openstack/ceilometer-0" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.266411 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-run-httpd\") pod \"ceilometer-0\" (UID: \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\") " pod="openstack/ceilometer-0" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.266425 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-log-httpd\") pod \"ceilometer-0\" (UID: \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\") " pod="openstack/ceilometer-0" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.266454 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mx728\" (UniqueName: \"kubernetes.io/projected/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-kube-api-access-mx728\") pod \"ceilometer-0\" (UID: \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\") " pod="openstack/ceilometer-0" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.368308 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\") " pod="openstack/ceilometer-0" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.368392 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-run-httpd\") pod \"ceilometer-0\" (UID: \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\") " pod="openstack/ceilometer-0" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.368413 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-log-httpd\") pod \"ceilometer-0\" (UID: \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\") " pod="openstack/ceilometer-0" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.368449 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mx728\" (UniqueName: \"kubernetes.io/projected/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-kube-api-access-mx728\") pod \"ceilometer-0\" (UID: \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\") " pod="openstack/ceilometer-0" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.368482 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-config-data\") pod \"ceilometer-0\" (UID: \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\") " pod="openstack/ceilometer-0" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.368525 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\") " pod="openstack/ceilometer-0" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.368584 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-scripts\") pod \"ceilometer-0\" (UID: \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\") " pod="openstack/ceilometer-0" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.369005 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-run-httpd\") pod \"ceilometer-0\" (UID: \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\") " pod="openstack/ceilometer-0" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.369857 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-log-httpd\") pod \"ceilometer-0\" (UID: \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\") " pod="openstack/ceilometer-0" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.382901 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\") " pod="openstack/ceilometer-0" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.383208 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-scripts\") pod \"ceilometer-0\" (UID: \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\") " pod="openstack/ceilometer-0" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.384085 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\") " pod="openstack/ceilometer-0" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.386598 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-config-data\") pod \"ceilometer-0\" (UID: \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\") " pod="openstack/ceilometer-0" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.386795 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mx728\" (UniqueName: \"kubernetes.io/projected/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-kube-api-access-mx728\") pod \"ceilometer-0\" (UID: \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\") " pod="openstack/ceilometer-0" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.503109 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:43:13 crc kubenswrapper[4903]: I1126 22:43:13.666600 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:43:14 crc kubenswrapper[4903]: I1126 22:43:14.048643 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f11fc1d-817f-4c2a-a777-a4ac0f568b07" path="/var/lib/kubelet/pods/3f11fc1d-817f-4c2a-a777-a4ac0f568b07/volumes" Nov 26 22:43:15 crc kubenswrapper[4903]: I1126 22:43:15.222495 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-76df48858c-p4q7x" Nov 26 22:43:15 crc kubenswrapper[4903]: I1126 22:43:15.224756 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-76df48858c-p4q7x" Nov 26 22:43:16 crc kubenswrapper[4903]: I1126 22:43:16.116312 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-bb4f45b8c-tn749" event={"ID":"6c8e8a58-424c-4199-82a1-4597cbab5011","Type":"ContainerStarted","Data":"acb60bf5e63d936e13293990572f3d662fd16919da1f2762232116a3c3474d25"} Nov 26 22:43:16 crc kubenswrapper[4903]: I1126 22:43:16.121974 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:43:16 crc kubenswrapper[4903]: I1126 22:43:16.140064 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-bb4f45b8c-tn749" podStartSLOduration=2.208959033 podStartE2EDuration="6.140046254s" podCreationTimestamp="2025-11-26 22:43:10 +0000 UTC" firstStartedPulling="2025-11-26 22:43:11.706889345 +0000 UTC m=+1320.397124255" lastFinishedPulling="2025-11-26 22:43:15.637976566 +0000 UTC m=+1324.328211476" observedRunningTime="2025-11-26 22:43:16.133154959 +0000 UTC m=+1324.823389869" watchObservedRunningTime="2025-11-26 22:43:16.140046254 +0000 UTC m=+1324.830281164" Nov 26 22:43:17 crc kubenswrapper[4903]: I1126 22:43:17.163584 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2","Type":"ContainerStarted","Data":"11ffbf70d54669a5f19bf8aab5d66618631acbf582d5225aeab11c0363bd1dbd"} Nov 26 22:43:17 crc kubenswrapper[4903]: I1126 22:43:17.164039 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2","Type":"ContainerStarted","Data":"4c3b4bef97fea08e779606dffcedfc739621d3d498d504f303519689fb0ba9ad"} Nov 26 22:43:17 crc kubenswrapper[4903]: I1126 22:43:17.178480 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7bd5bd57cd-5v7f2" event={"ID":"a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff","Type":"ContainerStarted","Data":"76ceb5cebe4ba7090e693e7bbcd1a156bc217b79d50b11089ba34a53bcf50f7a"} Nov 26 22:43:17 crc kubenswrapper[4903]: I1126 22:43:17.178554 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-7bd5bd57cd-5v7f2" Nov 26 22:43:17 crc kubenswrapper[4903]: I1126 22:43:17.178717 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-bb4f45b8c-tn749" Nov 26 22:43:17 crc kubenswrapper[4903]: I1126 22:43:17.212050 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-7bd5bd57cd-5v7f2" podStartSLOduration=3.437898956 podStartE2EDuration="7.212030777s" podCreationTimestamp="2025-11-26 22:43:10 +0000 UTC" firstStartedPulling="2025-11-26 22:43:11.863852655 +0000 UTC m=+1320.554087565" lastFinishedPulling="2025-11-26 22:43:15.637984476 +0000 UTC m=+1324.328219386" observedRunningTime="2025-11-26 22:43:17.196828419 +0000 UTC m=+1325.887063329" watchObservedRunningTime="2025-11-26 22:43:17.212030777 +0000 UTC m=+1325.902265687" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.389838 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-6c48d846b-6vkx8"] Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.391509 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-6c48d846b-6vkx8" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.399195 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-6c48d846b-6vkx8"] Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.474782 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-8c659cc69-lgxnn"] Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.476778 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-8c659cc69-lgxnn" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.491223 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-8c659cc69-lgxnn"] Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.502020 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-7777cd5989-t54l6"] Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.503398 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7777cd5989-t54l6" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.505203 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e2b20bd-f20a-485d-9020-b09ec0084c13-config-data\") pod \"heat-api-8c659cc69-lgxnn\" (UID: \"3e2b20bd-f20a-485d-9020-b09ec0084c13\") " pod="openstack/heat-api-8c659cc69-lgxnn" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.505311 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3e2b20bd-f20a-485d-9020-b09ec0084c13-config-data-custom\") pod \"heat-api-8c659cc69-lgxnn\" (UID: \"3e2b20bd-f20a-485d-9020-b09ec0084c13\") " pod="openstack/heat-api-8c659cc69-lgxnn" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.505329 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec7ca594-63f3-463d-992d-75a2bec894dc-config-data\") pod \"heat-engine-6c48d846b-6vkx8\" (UID: \"ec7ca594-63f3-463d-992d-75a2bec894dc\") " pod="openstack/heat-engine-6c48d846b-6vkx8" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.505345 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bpxrr\" (UniqueName: \"kubernetes.io/projected/ec7ca594-63f3-463d-992d-75a2bec894dc-kube-api-access-bpxrr\") pod \"heat-engine-6c48d846b-6vkx8\" (UID: \"ec7ca594-63f3-463d-992d-75a2bec894dc\") " pod="openstack/heat-engine-6c48d846b-6vkx8" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.505381 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e2b20bd-f20a-485d-9020-b09ec0084c13-combined-ca-bundle\") pod \"heat-api-8c659cc69-lgxnn\" (UID: \"3e2b20bd-f20a-485d-9020-b09ec0084c13\") " pod="openstack/heat-api-8c659cc69-lgxnn" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.505401 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvnkl\" (UniqueName: \"kubernetes.io/projected/3e2b20bd-f20a-485d-9020-b09ec0084c13-kube-api-access-bvnkl\") pod \"heat-api-8c659cc69-lgxnn\" (UID: \"3e2b20bd-f20a-485d-9020-b09ec0084c13\") " pod="openstack/heat-api-8c659cc69-lgxnn" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.505423 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ec7ca594-63f3-463d-992d-75a2bec894dc-config-data-custom\") pod \"heat-engine-6c48d846b-6vkx8\" (UID: \"ec7ca594-63f3-463d-992d-75a2bec894dc\") " pod="openstack/heat-engine-6c48d846b-6vkx8" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.505452 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec7ca594-63f3-463d-992d-75a2bec894dc-combined-ca-bundle\") pod \"heat-engine-6c48d846b-6vkx8\" (UID: \"ec7ca594-63f3-463d-992d-75a2bec894dc\") " pod="openstack/heat-engine-6c48d846b-6vkx8" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.528472 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7777cd5989-t54l6"] Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.606800 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec7ca594-63f3-463d-992d-75a2bec894dc-config-data\") pod \"heat-engine-6c48d846b-6vkx8\" (UID: \"ec7ca594-63f3-463d-992d-75a2bec894dc\") " pod="openstack/heat-engine-6c48d846b-6vkx8" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.606839 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3e2b20bd-f20a-485d-9020-b09ec0084c13-config-data-custom\") pod \"heat-api-8c659cc69-lgxnn\" (UID: \"3e2b20bd-f20a-485d-9020-b09ec0084c13\") " pod="openstack/heat-api-8c659cc69-lgxnn" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.606857 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bpxrr\" (UniqueName: \"kubernetes.io/projected/ec7ca594-63f3-463d-992d-75a2bec894dc-kube-api-access-bpxrr\") pod \"heat-engine-6c48d846b-6vkx8\" (UID: \"ec7ca594-63f3-463d-992d-75a2bec894dc\") " pod="openstack/heat-engine-6c48d846b-6vkx8" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.606902 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e2b20bd-f20a-485d-9020-b09ec0084c13-combined-ca-bundle\") pod \"heat-api-8c659cc69-lgxnn\" (UID: \"3e2b20bd-f20a-485d-9020-b09ec0084c13\") " pod="openstack/heat-api-8c659cc69-lgxnn" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.606923 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvnkl\" (UniqueName: \"kubernetes.io/projected/3e2b20bd-f20a-485d-9020-b09ec0084c13-kube-api-access-bvnkl\") pod \"heat-api-8c659cc69-lgxnn\" (UID: \"3e2b20bd-f20a-485d-9020-b09ec0084c13\") " pod="openstack/heat-api-8c659cc69-lgxnn" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.606946 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ec7ca594-63f3-463d-992d-75a2bec894dc-config-data-custom\") pod \"heat-engine-6c48d846b-6vkx8\" (UID: \"ec7ca594-63f3-463d-992d-75a2bec894dc\") " pod="openstack/heat-engine-6c48d846b-6vkx8" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.606978 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1d40a43f-69f3-4468-acbc-f9730dde2cd3-config-data-custom\") pod \"heat-cfnapi-7777cd5989-t54l6\" (UID: \"1d40a43f-69f3-4468-acbc-f9730dde2cd3\") " pod="openstack/heat-cfnapi-7777cd5989-t54l6" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.607003 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec7ca594-63f3-463d-992d-75a2bec894dc-combined-ca-bundle\") pod \"heat-engine-6c48d846b-6vkx8\" (UID: \"ec7ca594-63f3-463d-992d-75a2bec894dc\") " pod="openstack/heat-engine-6c48d846b-6vkx8" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.607022 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vm94d\" (UniqueName: \"kubernetes.io/projected/1d40a43f-69f3-4468-acbc-f9730dde2cd3-kube-api-access-vm94d\") pod \"heat-cfnapi-7777cd5989-t54l6\" (UID: \"1d40a43f-69f3-4468-acbc-f9730dde2cd3\") " pod="openstack/heat-cfnapi-7777cd5989-t54l6" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.607073 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e2b20bd-f20a-485d-9020-b09ec0084c13-config-data\") pod \"heat-api-8c659cc69-lgxnn\" (UID: \"3e2b20bd-f20a-485d-9020-b09ec0084c13\") " pod="openstack/heat-api-8c659cc69-lgxnn" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.607120 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d40a43f-69f3-4468-acbc-f9730dde2cd3-combined-ca-bundle\") pod \"heat-cfnapi-7777cd5989-t54l6\" (UID: \"1d40a43f-69f3-4468-acbc-f9730dde2cd3\") " pod="openstack/heat-cfnapi-7777cd5989-t54l6" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.607662 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d40a43f-69f3-4468-acbc-f9730dde2cd3-config-data\") pod \"heat-cfnapi-7777cd5989-t54l6\" (UID: \"1d40a43f-69f3-4468-acbc-f9730dde2cd3\") " pod="openstack/heat-cfnapi-7777cd5989-t54l6" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.612570 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e2b20bd-f20a-485d-9020-b09ec0084c13-combined-ca-bundle\") pod \"heat-api-8c659cc69-lgxnn\" (UID: \"3e2b20bd-f20a-485d-9020-b09ec0084c13\") " pod="openstack/heat-api-8c659cc69-lgxnn" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.612754 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec7ca594-63f3-463d-992d-75a2bec894dc-combined-ca-bundle\") pod \"heat-engine-6c48d846b-6vkx8\" (UID: \"ec7ca594-63f3-463d-992d-75a2bec894dc\") " pod="openstack/heat-engine-6c48d846b-6vkx8" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.613384 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3e2b20bd-f20a-485d-9020-b09ec0084c13-config-data-custom\") pod \"heat-api-8c659cc69-lgxnn\" (UID: \"3e2b20bd-f20a-485d-9020-b09ec0084c13\") " pod="openstack/heat-api-8c659cc69-lgxnn" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.613386 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec7ca594-63f3-463d-992d-75a2bec894dc-config-data\") pod \"heat-engine-6c48d846b-6vkx8\" (UID: \"ec7ca594-63f3-463d-992d-75a2bec894dc\") " pod="openstack/heat-engine-6c48d846b-6vkx8" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.616577 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e2b20bd-f20a-485d-9020-b09ec0084c13-config-data\") pod \"heat-api-8c659cc69-lgxnn\" (UID: \"3e2b20bd-f20a-485d-9020-b09ec0084c13\") " pod="openstack/heat-api-8c659cc69-lgxnn" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.623799 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ec7ca594-63f3-463d-992d-75a2bec894dc-config-data-custom\") pod \"heat-engine-6c48d846b-6vkx8\" (UID: \"ec7ca594-63f3-463d-992d-75a2bec894dc\") " pod="openstack/heat-engine-6c48d846b-6vkx8" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.635842 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bpxrr\" (UniqueName: \"kubernetes.io/projected/ec7ca594-63f3-463d-992d-75a2bec894dc-kube-api-access-bpxrr\") pod \"heat-engine-6c48d846b-6vkx8\" (UID: \"ec7ca594-63f3-463d-992d-75a2bec894dc\") " pod="openstack/heat-engine-6c48d846b-6vkx8" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.635823 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvnkl\" (UniqueName: \"kubernetes.io/projected/3e2b20bd-f20a-485d-9020-b09ec0084c13-kube-api-access-bvnkl\") pod \"heat-api-8c659cc69-lgxnn\" (UID: \"3e2b20bd-f20a-485d-9020-b09ec0084c13\") " pod="openstack/heat-api-8c659cc69-lgxnn" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.710034 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1d40a43f-69f3-4468-acbc-f9730dde2cd3-config-data-custom\") pod \"heat-cfnapi-7777cd5989-t54l6\" (UID: \"1d40a43f-69f3-4468-acbc-f9730dde2cd3\") " pod="openstack/heat-cfnapi-7777cd5989-t54l6" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.710112 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vm94d\" (UniqueName: \"kubernetes.io/projected/1d40a43f-69f3-4468-acbc-f9730dde2cd3-kube-api-access-vm94d\") pod \"heat-cfnapi-7777cd5989-t54l6\" (UID: \"1d40a43f-69f3-4468-acbc-f9730dde2cd3\") " pod="openstack/heat-cfnapi-7777cd5989-t54l6" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.710238 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d40a43f-69f3-4468-acbc-f9730dde2cd3-combined-ca-bundle\") pod \"heat-cfnapi-7777cd5989-t54l6\" (UID: \"1d40a43f-69f3-4468-acbc-f9730dde2cd3\") " pod="openstack/heat-cfnapi-7777cd5989-t54l6" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.710350 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d40a43f-69f3-4468-acbc-f9730dde2cd3-config-data\") pod \"heat-cfnapi-7777cd5989-t54l6\" (UID: \"1d40a43f-69f3-4468-acbc-f9730dde2cd3\") " pod="openstack/heat-cfnapi-7777cd5989-t54l6" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.715429 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d40a43f-69f3-4468-acbc-f9730dde2cd3-combined-ca-bundle\") pod \"heat-cfnapi-7777cd5989-t54l6\" (UID: \"1d40a43f-69f3-4468-acbc-f9730dde2cd3\") " pod="openstack/heat-cfnapi-7777cd5989-t54l6" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.716121 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d40a43f-69f3-4468-acbc-f9730dde2cd3-config-data\") pod \"heat-cfnapi-7777cd5989-t54l6\" (UID: \"1d40a43f-69f3-4468-acbc-f9730dde2cd3\") " pod="openstack/heat-cfnapi-7777cd5989-t54l6" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.716975 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1d40a43f-69f3-4468-acbc-f9730dde2cd3-config-data-custom\") pod \"heat-cfnapi-7777cd5989-t54l6\" (UID: \"1d40a43f-69f3-4468-acbc-f9730dde2cd3\") " pod="openstack/heat-cfnapi-7777cd5989-t54l6" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.730170 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vm94d\" (UniqueName: \"kubernetes.io/projected/1d40a43f-69f3-4468-acbc-f9730dde2cd3-kube-api-access-vm94d\") pod \"heat-cfnapi-7777cd5989-t54l6\" (UID: \"1d40a43f-69f3-4468-acbc-f9730dde2cd3\") " pod="openstack/heat-cfnapi-7777cd5989-t54l6" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.739921 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-6c48d846b-6vkx8" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.799396 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-8c659cc69-lgxnn" Nov 26 22:43:18 crc kubenswrapper[4903]: I1126 22:43:18.826127 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7777cd5989-t54l6" Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.731264 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-7bd5bd57cd-5v7f2"] Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.731465 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-api-7bd5bd57cd-5v7f2" podUID="a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff" containerName="heat-api" containerID="cri-o://76ceb5cebe4ba7090e693e7bbcd1a156bc217b79d50b11089ba34a53bcf50f7a" gracePeriod=60 Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.744731 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-bb4f45b8c-tn749"] Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.744975 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-bb4f45b8c-tn749" podUID="6c8e8a58-424c-4199-82a1-4597cbab5011" containerName="heat-cfnapi" containerID="cri-o://acb60bf5e63d936e13293990572f3d662fd16919da1f2762232116a3c3474d25" gracePeriod=60 Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.780517 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-67fbd86d69-b26tj"] Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.783435 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-67fbd86d69-b26tj" Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.790062 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-public-svc" Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.790475 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-internal-svc" Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.811617 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-67fbd86d69-b26tj"] Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.839454 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-config-data-custom\") pod \"heat-api-67fbd86d69-b26tj\" (UID: \"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a\") " pod="openstack/heat-api-67fbd86d69-b26tj" Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.839607 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-combined-ca-bundle\") pod \"heat-api-67fbd86d69-b26tj\" (UID: \"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a\") " pod="openstack/heat-api-67fbd86d69-b26tj" Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.839629 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-public-tls-certs\") pod \"heat-api-67fbd86d69-b26tj\" (UID: \"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a\") " pod="openstack/heat-api-67fbd86d69-b26tj" Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.839656 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-internal-tls-certs\") pod \"heat-api-67fbd86d69-b26tj\" (UID: \"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a\") " pod="openstack/heat-api-67fbd86d69-b26tj" Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.839680 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gw7rr\" (UniqueName: \"kubernetes.io/projected/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-kube-api-access-gw7rr\") pod \"heat-api-67fbd86d69-b26tj\" (UID: \"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a\") " pod="openstack/heat-api-67fbd86d69-b26tj" Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.839756 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-config-data\") pod \"heat-api-67fbd86d69-b26tj\" (UID: \"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a\") " pod="openstack/heat-api-67fbd86d69-b26tj" Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.847873 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-6fff56d55f-8bbzh"] Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.849553 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.860441 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-internal-svc" Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.861122 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-public-svc" Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.878202 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-6fff56d55f-8bbzh"] Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.945519 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-public-tls-certs\") pod \"heat-cfnapi-6fff56d55f-8bbzh\" (UID: \"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae\") " pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.945872 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-config-data\") pod \"heat-cfnapi-6fff56d55f-8bbzh\" (UID: \"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae\") " pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.945902 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-internal-tls-certs\") pod \"heat-cfnapi-6fff56d55f-8bbzh\" (UID: \"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae\") " pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.945938 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-combined-ca-bundle\") pod \"heat-api-67fbd86d69-b26tj\" (UID: \"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a\") " pod="openstack/heat-api-67fbd86d69-b26tj" Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.946037 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-public-tls-certs\") pod \"heat-api-67fbd86d69-b26tj\" (UID: \"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a\") " pod="openstack/heat-api-67fbd86d69-b26tj" Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.946130 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-internal-tls-certs\") pod \"heat-api-67fbd86d69-b26tj\" (UID: \"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a\") " pod="openstack/heat-api-67fbd86d69-b26tj" Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.946184 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gw7rr\" (UniqueName: \"kubernetes.io/projected/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-kube-api-access-gw7rr\") pod \"heat-api-67fbd86d69-b26tj\" (UID: \"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a\") " pod="openstack/heat-api-67fbd86d69-b26tj" Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.946747 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-config-data-custom\") pod \"heat-cfnapi-6fff56d55f-8bbzh\" (UID: \"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae\") " pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.946786 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-combined-ca-bundle\") pod \"heat-cfnapi-6fff56d55f-8bbzh\" (UID: \"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae\") " pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.946802 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xw5mb\" (UniqueName: \"kubernetes.io/projected/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-kube-api-access-xw5mb\") pod \"heat-cfnapi-6fff56d55f-8bbzh\" (UID: \"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae\") " pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.946959 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-config-data\") pod \"heat-api-67fbd86d69-b26tj\" (UID: \"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a\") " pod="openstack/heat-api-67fbd86d69-b26tj" Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.947068 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-config-data-custom\") pod \"heat-api-67fbd86d69-b26tj\" (UID: \"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a\") " pod="openstack/heat-api-67fbd86d69-b26tj" Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.951666 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-combined-ca-bundle\") pod \"heat-api-67fbd86d69-b26tj\" (UID: \"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a\") " pod="openstack/heat-api-67fbd86d69-b26tj" Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.952319 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-internal-tls-certs\") pod \"heat-api-67fbd86d69-b26tj\" (UID: \"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a\") " pod="openstack/heat-api-67fbd86d69-b26tj" Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.952791 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-config-data\") pod \"heat-api-67fbd86d69-b26tj\" (UID: \"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a\") " pod="openstack/heat-api-67fbd86d69-b26tj" Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.953859 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-config-data-custom\") pod \"heat-api-67fbd86d69-b26tj\" (UID: \"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a\") " pod="openstack/heat-api-67fbd86d69-b26tj" Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.954209 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-public-tls-certs\") pod \"heat-api-67fbd86d69-b26tj\" (UID: \"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a\") " pod="openstack/heat-api-67fbd86d69-b26tj" Nov 26 22:43:19 crc kubenswrapper[4903]: I1126 22:43:19.963574 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gw7rr\" (UniqueName: \"kubernetes.io/projected/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-kube-api-access-gw7rr\") pod \"heat-api-67fbd86d69-b26tj\" (UID: \"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a\") " pod="openstack/heat-api-67fbd86d69-b26tj" Nov 26 22:43:20 crc kubenswrapper[4903]: I1126 22:43:20.049108 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-config-data-custom\") pod \"heat-cfnapi-6fff56d55f-8bbzh\" (UID: \"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae\") " pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" Nov 26 22:43:20 crc kubenswrapper[4903]: I1126 22:43:20.049155 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-combined-ca-bundle\") pod \"heat-cfnapi-6fff56d55f-8bbzh\" (UID: \"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae\") " pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" Nov 26 22:43:20 crc kubenswrapper[4903]: I1126 22:43:20.049173 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xw5mb\" (UniqueName: \"kubernetes.io/projected/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-kube-api-access-xw5mb\") pod \"heat-cfnapi-6fff56d55f-8bbzh\" (UID: \"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae\") " pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" Nov 26 22:43:20 crc kubenswrapper[4903]: I1126 22:43:20.049309 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-public-tls-certs\") pod \"heat-cfnapi-6fff56d55f-8bbzh\" (UID: \"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae\") " pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" Nov 26 22:43:20 crc kubenswrapper[4903]: I1126 22:43:20.049345 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-config-data\") pod \"heat-cfnapi-6fff56d55f-8bbzh\" (UID: \"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae\") " pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" Nov 26 22:43:20 crc kubenswrapper[4903]: I1126 22:43:20.049370 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-internal-tls-certs\") pod \"heat-cfnapi-6fff56d55f-8bbzh\" (UID: \"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae\") " pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" Nov 26 22:43:20 crc kubenswrapper[4903]: I1126 22:43:20.054434 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-combined-ca-bundle\") pod \"heat-cfnapi-6fff56d55f-8bbzh\" (UID: \"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae\") " pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" Nov 26 22:43:20 crc kubenswrapper[4903]: I1126 22:43:20.054670 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-public-tls-certs\") pod \"heat-cfnapi-6fff56d55f-8bbzh\" (UID: \"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae\") " pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" Nov 26 22:43:20 crc kubenswrapper[4903]: I1126 22:43:20.054955 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-internal-tls-certs\") pod \"heat-cfnapi-6fff56d55f-8bbzh\" (UID: \"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae\") " pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" Nov 26 22:43:20 crc kubenswrapper[4903]: I1126 22:43:20.055121 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-config-data\") pod \"heat-cfnapi-6fff56d55f-8bbzh\" (UID: \"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae\") " pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" Nov 26 22:43:20 crc kubenswrapper[4903]: I1126 22:43:20.055336 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-config-data-custom\") pod \"heat-cfnapi-6fff56d55f-8bbzh\" (UID: \"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae\") " pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" Nov 26 22:43:20 crc kubenswrapper[4903]: I1126 22:43:20.067091 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xw5mb\" (UniqueName: \"kubernetes.io/projected/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-kube-api-access-xw5mb\") pod \"heat-cfnapi-6fff56d55f-8bbzh\" (UID: \"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae\") " pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" Nov 26 22:43:20 crc kubenswrapper[4903]: I1126 22:43:20.144057 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-67fbd86d69-b26tj" Nov 26 22:43:20 crc kubenswrapper[4903]: I1126 22:43:20.195812 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" Nov 26 22:43:20 crc kubenswrapper[4903]: I1126 22:43:20.210454 4903 generic.go:334] "Generic (PLEG): container finished" podID="a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff" containerID="76ceb5cebe4ba7090e693e7bbcd1a156bc217b79d50b11089ba34a53bcf50f7a" exitCode=0 Nov 26 22:43:20 crc kubenswrapper[4903]: I1126 22:43:20.210512 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7bd5bd57cd-5v7f2" event={"ID":"a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff","Type":"ContainerDied","Data":"76ceb5cebe4ba7090e693e7bbcd1a156bc217b79d50b11089ba34a53bcf50f7a"} Nov 26 22:43:20 crc kubenswrapper[4903]: I1126 22:43:20.212837 4903 generic.go:334] "Generic (PLEG): container finished" podID="6c8e8a58-424c-4199-82a1-4597cbab5011" containerID="acb60bf5e63d936e13293990572f3d662fd16919da1f2762232116a3c3474d25" exitCode=0 Nov 26 22:43:20 crc kubenswrapper[4903]: I1126 22:43:20.212878 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-bb4f45b8c-tn749" event={"ID":"6c8e8a58-424c-4199-82a1-4597cbab5011","Type":"ContainerDied","Data":"acb60bf5e63d936e13293990572f3d662fd16919da1f2762232116a3c3474d25"} Nov 26 22:43:20 crc kubenswrapper[4903]: I1126 22:43:20.808446 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" Nov 26 22:43:20 crc kubenswrapper[4903]: I1126 22:43:20.904834 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-2z8db"] Nov 26 22:43:20 crc kubenswrapper[4903]: I1126 22:43:20.905487 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" podUID="a4997378-3318-4e89-8210-5bfefe2d9467" containerName="dnsmasq-dns" containerID="cri-o://6f42106ab7746f6bff7560cd467fbcd734f1a3712d4510b0ba83270d697f91b3" gracePeriod=10 Nov 26 22:43:21 crc kubenswrapper[4903]: I1126 22:43:21.234322 4903 generic.go:334] "Generic (PLEG): container finished" podID="a4997378-3318-4e89-8210-5bfefe2d9467" containerID="6f42106ab7746f6bff7560cd467fbcd734f1a3712d4510b0ba83270d697f91b3" exitCode=0 Nov 26 22:43:21 crc kubenswrapper[4903]: I1126 22:43:21.234372 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" event={"ID":"a4997378-3318-4e89-8210-5bfefe2d9467","Type":"ContainerDied","Data":"6f42106ab7746f6bff7560cd467fbcd734f1a3712d4510b0ba83270d697f91b3"} Nov 26 22:43:22 crc kubenswrapper[4903]: I1126 22:43:22.514048 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" podUID="a4997378-3318-4e89-8210-5bfefe2d9467" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.199:5353: connect: connection refused" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.065562 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.150801 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a4997378-3318-4e89-8210-5bfefe2d9467-dns-svc\") pod \"a4997378-3318-4e89-8210-5bfefe2d9467\" (UID: \"a4997378-3318-4e89-8210-5bfefe2d9467\") " Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.151021 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a4997378-3318-4e89-8210-5bfefe2d9467-ovsdbserver-nb\") pod \"a4997378-3318-4e89-8210-5bfefe2d9467\" (UID: \"a4997378-3318-4e89-8210-5bfefe2d9467\") " Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.151098 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5k8tj\" (UniqueName: \"kubernetes.io/projected/a4997378-3318-4e89-8210-5bfefe2d9467-kube-api-access-5k8tj\") pod \"a4997378-3318-4e89-8210-5bfefe2d9467\" (UID: \"a4997378-3318-4e89-8210-5bfefe2d9467\") " Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.151121 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a4997378-3318-4e89-8210-5bfefe2d9467-dns-swift-storage-0\") pod \"a4997378-3318-4e89-8210-5bfefe2d9467\" (UID: \"a4997378-3318-4e89-8210-5bfefe2d9467\") " Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.151235 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a4997378-3318-4e89-8210-5bfefe2d9467-ovsdbserver-sb\") pod \"a4997378-3318-4e89-8210-5bfefe2d9467\" (UID: \"a4997378-3318-4e89-8210-5bfefe2d9467\") " Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.151262 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4997378-3318-4e89-8210-5bfefe2d9467-config\") pod \"a4997378-3318-4e89-8210-5bfefe2d9467\" (UID: \"a4997378-3318-4e89-8210-5bfefe2d9467\") " Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.171030 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4997378-3318-4e89-8210-5bfefe2d9467-kube-api-access-5k8tj" (OuterVolumeSpecName: "kube-api-access-5k8tj") pod "a4997378-3318-4e89-8210-5bfefe2d9467" (UID: "a4997378-3318-4e89-8210-5bfefe2d9467"). InnerVolumeSpecName "kube-api-access-5k8tj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.253625 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5k8tj\" (UniqueName: \"kubernetes.io/projected/a4997378-3318-4e89-8210-5bfefe2d9467-kube-api-access-5k8tj\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.261246 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-bb4f45b8c-tn749" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.286088 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4997378-3318-4e89-8210-5bfefe2d9467-config" (OuterVolumeSpecName: "config") pod "a4997378-3318-4e89-8210-5bfefe2d9467" (UID: "a4997378-3318-4e89-8210-5bfefe2d9467"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.286124 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4997378-3318-4e89-8210-5bfefe2d9467-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a4997378-3318-4e89-8210-5bfefe2d9467" (UID: "a4997378-3318-4e89-8210-5bfefe2d9467"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.287891 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-sfl5w" event={"ID":"6573f0cf-2bad-494e-8288-1c68c4326edb","Type":"ContainerStarted","Data":"277db87d3f7eed539d07fcdf860ff73245de3492f776f6db8aab98b2df719493"} Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.290085 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4997378-3318-4e89-8210-5bfefe2d9467-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a4997378-3318-4e89-8210-5bfefe2d9467" (UID: "a4997378-3318-4e89-8210-5bfefe2d9467"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.290426 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7bd5bd57cd-5v7f2" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.299619 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2","Type":"ContainerStarted","Data":"c9e26a9931cdb702542659da8325bd11069c994d578b7ac0cfe21cb9a1983326"} Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.307210 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4997378-3318-4e89-8210-5bfefe2d9467-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a4997378-3318-4e89-8210-5bfefe2d9467" (UID: "a4997378-3318-4e89-8210-5bfefe2d9467"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.309559 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" event={"ID":"a4997378-3318-4e89-8210-5bfefe2d9467","Type":"ContainerDied","Data":"628890ae32a188788ab49bef6d25e27a5d8f559fdadbf18eaa70ead3f43c3f8f"} Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.309615 4903 scope.go:117] "RemoveContainer" containerID="6f42106ab7746f6bff7560cd467fbcd734f1a3712d4510b0ba83270d697f91b3" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.309823 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-2z8db" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.310065 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-sfl5w" podStartSLOduration=1.836398661 podStartE2EDuration="13.310056045s" podCreationTimestamp="2025-11-26 22:43:11 +0000 UTC" firstStartedPulling="2025-11-26 22:43:12.314183164 +0000 UTC m=+1321.004418074" lastFinishedPulling="2025-11-26 22:43:23.787840548 +0000 UTC m=+1332.478075458" observedRunningTime="2025-11-26 22:43:24.304210898 +0000 UTC m=+1332.994445808" watchObservedRunningTime="2025-11-26 22:43:24.310056045 +0000 UTC m=+1333.000290955" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.315325 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4997378-3318-4e89-8210-5bfefe2d9467-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "a4997378-3318-4e89-8210-5bfefe2d9467" (UID: "a4997378-3318-4e89-8210-5bfefe2d9467"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.319615 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-bb4f45b8c-tn749" event={"ID":"6c8e8a58-424c-4199-82a1-4597cbab5011","Type":"ContainerDied","Data":"ea355f9b610b21cad27283ed6a0b36a0b983c591b6ffb1aab151e095f442818e"} Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.319730 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-bb4f45b8c-tn749" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.331894 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7bd5bd57cd-5v7f2" event={"ID":"a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff","Type":"ContainerDied","Data":"f64feddbe08cea2866cbff3aa1f792fe8fbec7cf448d4862166704b6f63ccd98"} Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.331982 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7bd5bd57cd-5v7f2" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.363522 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff-config-data\") pod \"a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff\" (UID: \"a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff\") " Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.363581 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c8e8a58-424c-4199-82a1-4597cbab5011-combined-ca-bundle\") pod \"6c8e8a58-424c-4199-82a1-4597cbab5011\" (UID: \"6c8e8a58-424c-4199-82a1-4597cbab5011\") " Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.363653 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff-config-data-custom\") pod \"a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff\" (UID: \"a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff\") " Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.363775 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c8e8a58-424c-4199-82a1-4597cbab5011-config-data\") pod \"6c8e8a58-424c-4199-82a1-4597cbab5011\" (UID: \"6c8e8a58-424c-4199-82a1-4597cbab5011\") " Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.363824 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kb866\" (UniqueName: \"kubernetes.io/projected/6c8e8a58-424c-4199-82a1-4597cbab5011-kube-api-access-kb866\") pod \"6c8e8a58-424c-4199-82a1-4597cbab5011\" (UID: \"6c8e8a58-424c-4199-82a1-4597cbab5011\") " Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.363845 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6c8e8a58-424c-4199-82a1-4597cbab5011-config-data-custom\") pod \"6c8e8a58-424c-4199-82a1-4597cbab5011\" (UID: \"6c8e8a58-424c-4199-82a1-4597cbab5011\") " Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.363920 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p6fl7\" (UniqueName: \"kubernetes.io/projected/a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff-kube-api-access-p6fl7\") pod \"a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff\" (UID: \"a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff\") " Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.363944 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff-combined-ca-bundle\") pod \"a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff\" (UID: \"a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff\") " Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.364483 4903 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a4997378-3318-4e89-8210-5bfefe2d9467-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.364503 4903 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a4997378-3318-4e89-8210-5bfefe2d9467-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.364513 4903 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a4997378-3318-4e89-8210-5bfefe2d9467-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.364522 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4997378-3318-4e89-8210-5bfefe2d9467-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.364530 4903 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a4997378-3318-4e89-8210-5bfefe2d9467-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.373767 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c8e8a58-424c-4199-82a1-4597cbab5011-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "6c8e8a58-424c-4199-82a1-4597cbab5011" (UID: "6c8e8a58-424c-4199-82a1-4597cbab5011"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.377181 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff" (UID: "a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.377314 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c8e8a58-424c-4199-82a1-4597cbab5011-kube-api-access-kb866" (OuterVolumeSpecName: "kube-api-access-kb866") pod "6c8e8a58-424c-4199-82a1-4597cbab5011" (UID: "6c8e8a58-424c-4199-82a1-4597cbab5011"). InnerVolumeSpecName "kube-api-access-kb866". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.395081 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff-kube-api-access-p6fl7" (OuterVolumeSpecName: "kube-api-access-p6fl7") pod "a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff" (UID: "a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff"). InnerVolumeSpecName "kube-api-access-p6fl7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.402046 4903 scope.go:117] "RemoveContainer" containerID="a44e6fc0addd81184eca5edcd4afc3e86d08d7c469ecfef8ed674f2626094baa" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.454203 4903 scope.go:117] "RemoveContainer" containerID="acb60bf5e63d936e13293990572f3d662fd16919da1f2762232116a3c3474d25" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.458806 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff" (UID: "a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.466232 4903 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.466266 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kb866\" (UniqueName: \"kubernetes.io/projected/6c8e8a58-424c-4199-82a1-4597cbab5011-kube-api-access-kb866\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.466276 4903 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6c8e8a58-424c-4199-82a1-4597cbab5011-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.466284 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p6fl7\" (UniqueName: \"kubernetes.io/projected/a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff-kube-api-access-p6fl7\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.466293 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.481858 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c8e8a58-424c-4199-82a1-4597cbab5011-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6c8e8a58-424c-4199-82a1-4597cbab5011" (UID: "6c8e8a58-424c-4199-82a1-4597cbab5011"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.486655 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c8e8a58-424c-4199-82a1-4597cbab5011-config-data" (OuterVolumeSpecName: "config-data") pod "6c8e8a58-424c-4199-82a1-4597cbab5011" (UID: "6c8e8a58-424c-4199-82a1-4597cbab5011"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.493899 4903 scope.go:117] "RemoveContainer" containerID="76ceb5cebe4ba7090e693e7bbcd1a156bc217b79d50b11089ba34a53bcf50f7a" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.511292 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff-config-data" (OuterVolumeSpecName: "config-data") pod "a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff" (UID: "a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.568057 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.568091 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c8e8a58-424c-4199-82a1-4597cbab5011-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.568100 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c8e8a58-424c-4199-82a1-4597cbab5011-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:24 crc kubenswrapper[4903]: W1126 22:43:24.762327 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4ee458b7_00d7_40e4_8c43_8c61e6fb87ae.slice/crio-499cb623a12d695d2ada381d47e43a6cdc2086605a9818b62b6a1f89dc660152 WatchSource:0}: Error finding container 499cb623a12d695d2ada381d47e43a6cdc2086605a9818b62b6a1f89dc660152: Status 404 returned error can't find the container with id 499cb623a12d695d2ada381d47e43a6cdc2086605a9818b62b6a1f89dc660152 Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.763710 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-8c659cc69-lgxnn"] Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.798737 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-2z8db"] Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.812637 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-6fff56d55f-8bbzh"] Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.825451 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-2z8db"] Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.846569 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-6c48d846b-6vkx8"] Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.863019 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-67fbd86d69-b26tj"] Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.872990 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7777cd5989-t54l6"] Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.883649 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-bb4f45b8c-tn749"] Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.892441 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-bb4f45b8c-tn749"] Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.901256 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.901503 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="635b390e-4ef6-41fb-a81d-cf4f819d2b66" containerName="glance-log" containerID="cri-o://2a0d4da81420be605fab4e1c6e2df4d96cf065623347e9f2136de15a180c6a27" gracePeriod=30 Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.901987 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="635b390e-4ef6-41fb-a81d-cf4f819d2b66" containerName="glance-httpd" containerID="cri-o://0a32b28d121888b53d1f6c7cecf250676da76bd12f79d54eb1bc8f0ab2de18cd" gracePeriod=30 Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.949604 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-7bd5bd57cd-5v7f2"] Nov 26 22:43:24 crc kubenswrapper[4903]: I1126 22:43:24.967264 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-7bd5bd57cd-5v7f2"] Nov 26 22:43:25 crc kubenswrapper[4903]: I1126 22:43:25.347081 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" event={"ID":"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae","Type":"ContainerStarted","Data":"3b8719ae15f3ee09a83474358e9125d061ea8ead25fecc0051331bfdb2122077"} Nov 26 22:43:25 crc kubenswrapper[4903]: I1126 22:43:25.347360 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" event={"ID":"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae","Type":"ContainerStarted","Data":"499cb623a12d695d2ada381d47e43a6cdc2086605a9818b62b6a1f89dc660152"} Nov 26 22:43:25 crc kubenswrapper[4903]: I1126 22:43:25.347477 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" Nov 26 22:43:25 crc kubenswrapper[4903]: I1126 22:43:25.354910 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-6c48d846b-6vkx8" event={"ID":"ec7ca594-63f3-463d-992d-75a2bec894dc","Type":"ContainerStarted","Data":"99ee967d559430be4daddde8eee86bd479913b4205fef6778bf5864ac6d7adad"} Nov 26 22:43:25 crc kubenswrapper[4903]: I1126 22:43:25.354951 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-6c48d846b-6vkx8" event={"ID":"ec7ca594-63f3-463d-992d-75a2bec894dc","Type":"ContainerStarted","Data":"aa8a5802ee424018214497d59b00733d1682229188c99f9641731526a3175e49"} Nov 26 22:43:25 crc kubenswrapper[4903]: I1126 22:43:25.355906 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-6c48d846b-6vkx8" Nov 26 22:43:25 crc kubenswrapper[4903]: I1126 22:43:25.360544 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-67fbd86d69-b26tj" event={"ID":"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a","Type":"ContainerStarted","Data":"4dc2b6467314c282d106d8006f59bdb9a904b3c666d77a0350bad9cc7de820c1"} Nov 26 22:43:25 crc kubenswrapper[4903]: I1126 22:43:25.360587 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-67fbd86d69-b26tj" event={"ID":"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a","Type":"ContainerStarted","Data":"17be9b44e761978010c65e501d3be4ad4ca59f7b3f85ed15a583a1d8a9347795"} Nov 26 22:43:25 crc kubenswrapper[4903]: I1126 22:43:25.361549 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-67fbd86d69-b26tj" Nov 26 22:43:25 crc kubenswrapper[4903]: I1126 22:43:25.373860 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" podStartSLOduration=6.373840539 podStartE2EDuration="6.373840539s" podCreationTimestamp="2025-11-26 22:43:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:43:25.365906825 +0000 UTC m=+1334.056141735" watchObservedRunningTime="2025-11-26 22:43:25.373840539 +0000 UTC m=+1334.064075449" Nov 26 22:43:25 crc kubenswrapper[4903]: I1126 22:43:25.385027 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-8c659cc69-lgxnn" event={"ID":"3e2b20bd-f20a-485d-9020-b09ec0084c13","Type":"ContainerStarted","Data":"9b9c2fe9e5ee7222a6476dd28cba72ce17603f8a11069b92ed32d37f388ef84e"} Nov 26 22:43:25 crc kubenswrapper[4903]: I1126 22:43:25.385073 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-8c659cc69-lgxnn" event={"ID":"3e2b20bd-f20a-485d-9020-b09ec0084c13","Type":"ContainerStarted","Data":"3f32f9aa0621e2dd61ac9dc9db2852783633f9dd71526a70670828c631e93bc7"} Nov 26 22:43:25 crc kubenswrapper[4903]: I1126 22:43:25.385117 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-8c659cc69-lgxnn" Nov 26 22:43:25 crc kubenswrapper[4903]: I1126 22:43:25.397335 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-6c48d846b-6vkx8" podStartSLOduration=7.397312488 podStartE2EDuration="7.397312488s" podCreationTimestamp="2025-11-26 22:43:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:43:25.381869434 +0000 UTC m=+1334.072104344" watchObservedRunningTime="2025-11-26 22:43:25.397312488 +0000 UTC m=+1334.087547398" Nov 26 22:43:25 crc kubenswrapper[4903]: I1126 22:43:25.409624 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2","Type":"ContainerStarted","Data":"3e0042e14929a74910f4e7abf177435340687ca74a9ec58bf05cf3b6bb69d6bb"} Nov 26 22:43:25 crc kubenswrapper[4903]: I1126 22:43:25.419373 4903 generic.go:334] "Generic (PLEG): container finished" podID="635b390e-4ef6-41fb-a81d-cf4f819d2b66" containerID="2a0d4da81420be605fab4e1c6e2df4d96cf065623347e9f2136de15a180c6a27" exitCode=143 Nov 26 22:43:25 crc kubenswrapper[4903]: I1126 22:43:25.419637 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"635b390e-4ef6-41fb-a81d-cf4f819d2b66","Type":"ContainerDied","Data":"2a0d4da81420be605fab4e1c6e2df4d96cf065623347e9f2136de15a180c6a27"} Nov 26 22:43:25 crc kubenswrapper[4903]: I1126 22:43:25.424311 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7777cd5989-t54l6" event={"ID":"1d40a43f-69f3-4468-acbc-f9730dde2cd3","Type":"ContainerStarted","Data":"585000be8d5e6bb241602fb3bcb0e0b8f17407b63d3d1c7631a21ff32d18478d"} Nov 26 22:43:25 crc kubenswrapper[4903]: I1126 22:43:25.424452 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7777cd5989-t54l6" event={"ID":"1d40a43f-69f3-4468-acbc-f9730dde2cd3","Type":"ContainerStarted","Data":"68e8ab7fc5ef91836d34a8d8dcdc575ab5ceec782da27ee464207e5489b64a49"} Nov 26 22:43:25 crc kubenswrapper[4903]: I1126 22:43:25.425821 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-7777cd5989-t54l6" Nov 26 22:43:25 crc kubenswrapper[4903]: I1126 22:43:25.426762 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-67fbd86d69-b26tj" podStartSLOduration=6.426742608 podStartE2EDuration="6.426742608s" podCreationTimestamp="2025-11-26 22:43:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:43:25.398065938 +0000 UTC m=+1334.088300848" watchObservedRunningTime="2025-11-26 22:43:25.426742608 +0000 UTC m=+1334.116977518" Nov 26 22:43:25 crc kubenswrapper[4903]: I1126 22:43:25.437375 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-8c659cc69-lgxnn" podStartSLOduration=7.437356322 podStartE2EDuration="7.437356322s" podCreationTimestamp="2025-11-26 22:43:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:43:25.416050241 +0000 UTC m=+1334.106285151" watchObservedRunningTime="2025-11-26 22:43:25.437356322 +0000 UTC m=+1334.127591232" Nov 26 22:43:25 crc kubenswrapper[4903]: I1126 22:43:25.449127 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-7777cd5989-t54l6" podStartSLOduration=7.449107008 podStartE2EDuration="7.449107008s" podCreationTimestamp="2025-11-26 22:43:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:43:25.445192143 +0000 UTC m=+1334.135427053" watchObservedRunningTime="2025-11-26 22:43:25.449107008 +0000 UTC m=+1334.139341918" Nov 26 22:43:26 crc kubenswrapper[4903]: I1126 22:43:26.041525 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c8e8a58-424c-4199-82a1-4597cbab5011" path="/var/lib/kubelet/pods/6c8e8a58-424c-4199-82a1-4597cbab5011/volumes" Nov 26 22:43:26 crc kubenswrapper[4903]: I1126 22:43:26.042285 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff" path="/var/lib/kubelet/pods/a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff/volumes" Nov 26 22:43:26 crc kubenswrapper[4903]: I1126 22:43:26.042787 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4997378-3318-4e89-8210-5bfefe2d9467" path="/var/lib/kubelet/pods/a4997378-3318-4e89-8210-5bfefe2d9467/volumes" Nov 26 22:43:26 crc kubenswrapper[4903]: I1126 22:43:26.452345 4903 generic.go:334] "Generic (PLEG): container finished" podID="1d40a43f-69f3-4468-acbc-f9730dde2cd3" containerID="585000be8d5e6bb241602fb3bcb0e0b8f17407b63d3d1c7631a21ff32d18478d" exitCode=1 Nov 26 22:43:26 crc kubenswrapper[4903]: I1126 22:43:26.452481 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7777cd5989-t54l6" event={"ID":"1d40a43f-69f3-4468-acbc-f9730dde2cd3","Type":"ContainerDied","Data":"585000be8d5e6bb241602fb3bcb0e0b8f17407b63d3d1c7631a21ff32d18478d"} Nov 26 22:43:26 crc kubenswrapper[4903]: I1126 22:43:26.453190 4903 scope.go:117] "RemoveContainer" containerID="585000be8d5e6bb241602fb3bcb0e0b8f17407b63d3d1c7631a21ff32d18478d" Nov 26 22:43:26 crc kubenswrapper[4903]: I1126 22:43:26.455069 4903 generic.go:334] "Generic (PLEG): container finished" podID="3e2b20bd-f20a-485d-9020-b09ec0084c13" containerID="9b9c2fe9e5ee7222a6476dd28cba72ce17603f8a11069b92ed32d37f388ef84e" exitCode=1 Nov 26 22:43:26 crc kubenswrapper[4903]: I1126 22:43:26.456490 4903 scope.go:117] "RemoveContainer" containerID="9b9c2fe9e5ee7222a6476dd28cba72ce17603f8a11069b92ed32d37f388ef84e" Nov 26 22:43:26 crc kubenswrapper[4903]: I1126 22:43:26.456618 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-8c659cc69-lgxnn" event={"ID":"3e2b20bd-f20a-485d-9020-b09ec0084c13","Type":"ContainerDied","Data":"9b9c2fe9e5ee7222a6476dd28cba72ce17603f8a11069b92ed32d37f388ef84e"} Nov 26 22:43:27 crc kubenswrapper[4903]: I1126 22:43:27.469071 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2","Type":"ContainerStarted","Data":"2fac9d0f4f7f392184f0411d3cd238827def4c59bea60467291bed0e0ff82774"} Nov 26 22:43:27 crc kubenswrapper[4903]: I1126 22:43:27.469158 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1e2d4f89-a0e3-43aa-8af6-c07146ef81b2" containerName="ceilometer-central-agent" containerID="cri-o://11ffbf70d54669a5f19bf8aab5d66618631acbf582d5225aeab11c0363bd1dbd" gracePeriod=30 Nov 26 22:43:27 crc kubenswrapper[4903]: I1126 22:43:27.469214 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1e2d4f89-a0e3-43aa-8af6-c07146ef81b2" containerName="proxy-httpd" containerID="cri-o://2fac9d0f4f7f392184f0411d3cd238827def4c59bea60467291bed0e0ff82774" gracePeriod=30 Nov 26 22:43:27 crc kubenswrapper[4903]: I1126 22:43:27.469244 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1e2d4f89-a0e3-43aa-8af6-c07146ef81b2" containerName="sg-core" containerID="cri-o://3e0042e14929a74910f4e7abf177435340687ca74a9ec58bf05cf3b6bb69d6bb" gracePeriod=30 Nov 26 22:43:27 crc kubenswrapper[4903]: I1126 22:43:27.469308 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1e2d4f89-a0e3-43aa-8af6-c07146ef81b2" containerName="ceilometer-notification-agent" containerID="cri-o://c9e26a9931cdb702542659da8325bd11069c994d578b7ac0cfe21cb9a1983326" gracePeriod=30 Nov 26 22:43:27 crc kubenswrapper[4903]: I1126 22:43:27.469535 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 22:43:27 crc kubenswrapper[4903]: I1126 22:43:27.474637 4903 generic.go:334] "Generic (PLEG): container finished" podID="1d40a43f-69f3-4468-acbc-f9730dde2cd3" containerID="04fd4c1fd269b8b643c938a5a207d73a61874434cb596a92e44b222b49f040ef" exitCode=1 Nov 26 22:43:27 crc kubenswrapper[4903]: I1126 22:43:27.474679 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7777cd5989-t54l6" event={"ID":"1d40a43f-69f3-4468-acbc-f9730dde2cd3","Type":"ContainerDied","Data":"04fd4c1fd269b8b643c938a5a207d73a61874434cb596a92e44b222b49f040ef"} Nov 26 22:43:27 crc kubenswrapper[4903]: I1126 22:43:27.474754 4903 scope.go:117] "RemoveContainer" containerID="585000be8d5e6bb241602fb3bcb0e0b8f17407b63d3d1c7631a21ff32d18478d" Nov 26 22:43:27 crc kubenswrapper[4903]: I1126 22:43:27.475604 4903 scope.go:117] "RemoveContainer" containerID="04fd4c1fd269b8b643c938a5a207d73a61874434cb596a92e44b222b49f040ef" Nov 26 22:43:27 crc kubenswrapper[4903]: E1126 22:43:27.476227 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-7777cd5989-t54l6_openstack(1d40a43f-69f3-4468-acbc-f9730dde2cd3)\"" pod="openstack/heat-cfnapi-7777cd5989-t54l6" podUID="1d40a43f-69f3-4468-acbc-f9730dde2cd3" Nov 26 22:43:27 crc kubenswrapper[4903]: I1126 22:43:27.486922 4903 generic.go:334] "Generic (PLEG): container finished" podID="3e2b20bd-f20a-485d-9020-b09ec0084c13" containerID="9f0fd9b55e977b12ee2cce922617380cdfbd0d17b51703bd3615f09a9c20262f" exitCode=1 Nov 26 22:43:27 crc kubenswrapper[4903]: I1126 22:43:27.488605 4903 scope.go:117] "RemoveContainer" containerID="9f0fd9b55e977b12ee2cce922617380cdfbd0d17b51703bd3615f09a9c20262f" Nov 26 22:43:27 crc kubenswrapper[4903]: E1126 22:43:27.488942 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-8c659cc69-lgxnn_openstack(3e2b20bd-f20a-485d-9020-b09ec0084c13)\"" pod="openstack/heat-api-8c659cc69-lgxnn" podUID="3e2b20bd-f20a-485d-9020-b09ec0084c13" Nov 26 22:43:27 crc kubenswrapper[4903]: I1126 22:43:27.488957 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-8c659cc69-lgxnn" event={"ID":"3e2b20bd-f20a-485d-9020-b09ec0084c13","Type":"ContainerDied","Data":"9f0fd9b55e977b12ee2cce922617380cdfbd0d17b51703bd3615f09a9c20262f"} Nov 26 22:43:27 crc kubenswrapper[4903]: I1126 22:43:27.508602 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.652828468 podStartE2EDuration="14.508585938s" podCreationTimestamp="2025-11-26 22:43:13 +0000 UTC" firstStartedPulling="2025-11-26 22:43:16.131510785 +0000 UTC m=+1324.821745695" lastFinishedPulling="2025-11-26 22:43:26.987268255 +0000 UTC m=+1335.677503165" observedRunningTime="2025-11-26 22:43:27.497563623 +0000 UTC m=+1336.187798543" watchObservedRunningTime="2025-11-26 22:43:27.508585938 +0000 UTC m=+1336.198820848" Nov 26 22:43:27 crc kubenswrapper[4903]: I1126 22:43:27.553925 4903 scope.go:117] "RemoveContainer" containerID="9b9c2fe9e5ee7222a6476dd28cba72ce17603f8a11069b92ed32d37f388ef84e" Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.001628 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.001914 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="306f5561-d47c-4e76-a4ab-5320d327954d" containerName="glance-log" containerID="cri-o://8505d04695a9d0c561bd090e0fc5fde3adca7845b5084012c0e05e5fd474d197" gracePeriod=30 Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.002147 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="306f5561-d47c-4e76-a4ab-5320d327954d" containerName="glance-httpd" containerID="cri-o://f51c317ace15fdc8c4f6bc5041bb57e433115de30aa770ec064ccecbb29c7035" gracePeriod=30 Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.504782 4903 scope.go:117] "RemoveContainer" containerID="9f0fd9b55e977b12ee2cce922617380cdfbd0d17b51703bd3615f09a9c20262f" Nov 26 22:43:28 crc kubenswrapper[4903]: E1126 22:43:28.505253 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-8c659cc69-lgxnn_openstack(3e2b20bd-f20a-485d-9020-b09ec0084c13)\"" pod="openstack/heat-api-8c659cc69-lgxnn" podUID="3e2b20bd-f20a-485d-9020-b09ec0084c13" Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.514932 4903 generic.go:334] "Generic (PLEG): container finished" podID="306f5561-d47c-4e76-a4ab-5320d327954d" containerID="8505d04695a9d0c561bd090e0fc5fde3adca7845b5084012c0e05e5fd474d197" exitCode=143 Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.515003 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"306f5561-d47c-4e76-a4ab-5320d327954d","Type":"ContainerDied","Data":"8505d04695a9d0c561bd090e0fc5fde3adca7845b5084012c0e05e5fd474d197"} Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.528521 4903 generic.go:334] "Generic (PLEG): container finished" podID="1e2d4f89-a0e3-43aa-8af6-c07146ef81b2" containerID="3e0042e14929a74910f4e7abf177435340687ca74a9ec58bf05cf3b6bb69d6bb" exitCode=2 Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.528548 4903 generic.go:334] "Generic (PLEG): container finished" podID="1e2d4f89-a0e3-43aa-8af6-c07146ef81b2" containerID="c9e26a9931cdb702542659da8325bd11069c994d578b7ac0cfe21cb9a1983326" exitCode=0 Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.528575 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2","Type":"ContainerDied","Data":"3e0042e14929a74910f4e7abf177435340687ca74a9ec58bf05cf3b6bb69d6bb"} Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.528638 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2","Type":"ContainerDied","Data":"c9e26a9931cdb702542659da8325bd11069c994d578b7ac0cfe21cb9a1983326"} Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.528657 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2","Type":"ContainerDied","Data":"11ffbf70d54669a5f19bf8aab5d66618631acbf582d5225aeab11c0363bd1dbd"} Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.528601 4903 generic.go:334] "Generic (PLEG): container finished" podID="1e2d4f89-a0e3-43aa-8af6-c07146ef81b2" containerID="11ffbf70d54669a5f19bf8aab5d66618631acbf582d5225aeab11c0363bd1dbd" exitCode=0 Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.532620 4903 generic.go:334] "Generic (PLEG): container finished" podID="635b390e-4ef6-41fb-a81d-cf4f819d2b66" containerID="0a32b28d121888b53d1f6c7cecf250676da76bd12f79d54eb1bc8f0ab2de18cd" exitCode=0 Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.532685 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"635b390e-4ef6-41fb-a81d-cf4f819d2b66","Type":"ContainerDied","Data":"0a32b28d121888b53d1f6c7cecf250676da76bd12f79d54eb1bc8f0ab2de18cd"} Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.537714 4903 scope.go:117] "RemoveContainer" containerID="04fd4c1fd269b8b643c938a5a207d73a61874434cb596a92e44b222b49f040ef" Nov 26 22:43:28 crc kubenswrapper[4903]: E1126 22:43:28.537928 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-7777cd5989-t54l6_openstack(1d40a43f-69f3-4468-acbc-f9730dde2cd3)\"" pod="openstack/heat-cfnapi-7777cd5989-t54l6" podUID="1d40a43f-69f3-4468-acbc-f9730dde2cd3" Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.708884 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.779382 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/635b390e-4ef6-41fb-a81d-cf4f819d2b66-config-data\") pod \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.779722 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/635b390e-4ef6-41fb-a81d-cf4f819d2b66-public-tls-certs\") pod \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.779746 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/635b390e-4ef6-41fb-a81d-cf4f819d2b66-httpd-run\") pod \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.779818 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/635b390e-4ef6-41fb-a81d-cf4f819d2b66-logs\") pod \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.779843 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/635b390e-4ef6-41fb-a81d-cf4f819d2b66-combined-ca-bundle\") pod \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.779901 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/635b390e-4ef6-41fb-a81d-cf4f819d2b66-scripts\") pod \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.779924 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8ff6z\" (UniqueName: \"kubernetes.io/projected/635b390e-4ef6-41fb-a81d-cf4f819d2b66-kube-api-access-8ff6z\") pod \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.780005 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\" (UID: \"635b390e-4ef6-41fb-a81d-cf4f819d2b66\") " Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.780271 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/635b390e-4ef6-41fb-a81d-cf4f819d2b66-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "635b390e-4ef6-41fb-a81d-cf4f819d2b66" (UID: "635b390e-4ef6-41fb-a81d-cf4f819d2b66"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.780530 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/635b390e-4ef6-41fb-a81d-cf4f819d2b66-logs" (OuterVolumeSpecName: "logs") pod "635b390e-4ef6-41fb-a81d-cf4f819d2b66" (UID: "635b390e-4ef6-41fb-a81d-cf4f819d2b66"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.781003 4903 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/635b390e-4ef6-41fb-a81d-cf4f819d2b66-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.781022 4903 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/635b390e-4ef6-41fb-a81d-cf4f819d2b66-logs\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.787498 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "635b390e-4ef6-41fb-a81d-cf4f819d2b66" (UID: "635b390e-4ef6-41fb-a81d-cf4f819d2b66"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.788421 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/635b390e-4ef6-41fb-a81d-cf4f819d2b66-scripts" (OuterVolumeSpecName: "scripts") pod "635b390e-4ef6-41fb-a81d-cf4f819d2b66" (UID: "635b390e-4ef6-41fb-a81d-cf4f819d2b66"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.791757 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/635b390e-4ef6-41fb-a81d-cf4f819d2b66-kube-api-access-8ff6z" (OuterVolumeSpecName: "kube-api-access-8ff6z") pod "635b390e-4ef6-41fb-a81d-cf4f819d2b66" (UID: "635b390e-4ef6-41fb-a81d-cf4f819d2b66"). InnerVolumeSpecName "kube-api-access-8ff6z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.800497 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-8c659cc69-lgxnn" Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.800537 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-api-8c659cc69-lgxnn" Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.826778 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-cfnapi-7777cd5989-t54l6" Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.826830 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-7777cd5989-t54l6" Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.837876 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/635b390e-4ef6-41fb-a81d-cf4f819d2b66-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "635b390e-4ef6-41fb-a81d-cf4f819d2b66" (UID: "635b390e-4ef6-41fb-a81d-cf4f819d2b66"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.866804 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/635b390e-4ef6-41fb-a81d-cf4f819d2b66-config-data" (OuterVolumeSpecName: "config-data") pod "635b390e-4ef6-41fb-a81d-cf4f819d2b66" (UID: "635b390e-4ef6-41fb-a81d-cf4f819d2b66"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.885252 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8ff6z\" (UniqueName: \"kubernetes.io/projected/635b390e-4ef6-41fb-a81d-cf4f819d2b66-kube-api-access-8ff6z\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.885288 4903 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.885303 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/635b390e-4ef6-41fb-a81d-cf4f819d2b66-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.885312 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/635b390e-4ef6-41fb-a81d-cf4f819d2b66-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.885322 4903 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/635b390e-4ef6-41fb-a81d-cf4f819d2b66-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.886419 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/635b390e-4ef6-41fb-a81d-cf4f819d2b66-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "635b390e-4ef6-41fb-a81d-cf4f819d2b66" (UID: "635b390e-4ef6-41fb-a81d-cf4f819d2b66"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.926407 4903 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.987143 4903 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/635b390e-4ef6-41fb-a81d-cf4f819d2b66-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:28 crc kubenswrapper[4903]: I1126 22:43:28.987170 4903 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.548375 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"635b390e-4ef6-41fb-a81d-cf4f819d2b66","Type":"ContainerDied","Data":"2bbdefa8bf7f14be7a999a70047d42e5205cf1cc221381e75199313cf2c2be79"} Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.548422 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.548434 4903 scope.go:117] "RemoveContainer" containerID="0a32b28d121888b53d1f6c7cecf250676da76bd12f79d54eb1bc8f0ab2de18cd" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.549223 4903 scope.go:117] "RemoveContainer" containerID="04fd4c1fd269b8b643c938a5a207d73a61874434cb596a92e44b222b49f040ef" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.549283 4903 scope.go:117] "RemoveContainer" containerID="9f0fd9b55e977b12ee2cce922617380cdfbd0d17b51703bd3615f09a9c20262f" Nov 26 22:43:29 crc kubenswrapper[4903]: E1126 22:43:29.549598 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-7777cd5989-t54l6_openstack(1d40a43f-69f3-4468-acbc-f9730dde2cd3)\"" pod="openstack/heat-cfnapi-7777cd5989-t54l6" podUID="1d40a43f-69f3-4468-acbc-f9730dde2cd3" Nov 26 22:43:29 crc kubenswrapper[4903]: E1126 22:43:29.549603 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-8c659cc69-lgxnn_openstack(3e2b20bd-f20a-485d-9020-b09ec0084c13)\"" pod="openstack/heat-api-8c659cc69-lgxnn" podUID="3e2b20bd-f20a-485d-9020-b09ec0084c13" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.586881 4903 scope.go:117] "RemoveContainer" containerID="2a0d4da81420be605fab4e1c6e2df4d96cf065623347e9f2136de15a180c6a27" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.677603 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.693455 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.705501 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 22:43:29 crc kubenswrapper[4903]: E1126 22:43:29.705971 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c8e8a58-424c-4199-82a1-4597cbab5011" containerName="heat-cfnapi" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.705989 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c8e8a58-424c-4199-82a1-4597cbab5011" containerName="heat-cfnapi" Nov 26 22:43:29 crc kubenswrapper[4903]: E1126 22:43:29.706006 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="635b390e-4ef6-41fb-a81d-cf4f819d2b66" containerName="glance-log" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.706013 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="635b390e-4ef6-41fb-a81d-cf4f819d2b66" containerName="glance-log" Nov 26 22:43:29 crc kubenswrapper[4903]: E1126 22:43:29.706041 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4997378-3318-4e89-8210-5bfefe2d9467" containerName="dnsmasq-dns" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.706048 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4997378-3318-4e89-8210-5bfefe2d9467" containerName="dnsmasq-dns" Nov 26 22:43:29 crc kubenswrapper[4903]: E1126 22:43:29.706063 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="635b390e-4ef6-41fb-a81d-cf4f819d2b66" containerName="glance-httpd" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.706070 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="635b390e-4ef6-41fb-a81d-cf4f819d2b66" containerName="glance-httpd" Nov 26 22:43:29 crc kubenswrapper[4903]: E1126 22:43:29.706079 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff" containerName="heat-api" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.706084 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff" containerName="heat-api" Nov 26 22:43:29 crc kubenswrapper[4903]: E1126 22:43:29.706100 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4997378-3318-4e89-8210-5bfefe2d9467" containerName="init" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.706107 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4997378-3318-4e89-8210-5bfefe2d9467" containerName="init" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.706320 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="635b390e-4ef6-41fb-a81d-cf4f819d2b66" containerName="glance-httpd" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.706330 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="a3196fd3-9a53-4b48-95d0-49f5d1e4f5ff" containerName="heat-api" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.706341 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4997378-3318-4e89-8210-5bfefe2d9467" containerName="dnsmasq-dns" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.706351 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c8e8a58-424c-4199-82a1-4597cbab5011" containerName="heat-cfnapi" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.706367 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="635b390e-4ef6-41fb-a81d-cf4f819d2b66" containerName="glance-log" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.707531 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.709324 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.709722 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.720610 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 22:43:29 crc kubenswrapper[4903]: E1126 22:43:29.817165 4903 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod635b390e_4ef6_41fb_a81d_cf4f819d2b66.slice/crio-2bbdefa8bf7f14be7a999a70047d42e5205cf1cc221381e75199313cf2c2be79\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod635b390e_4ef6_41fb_a81d_cf4f819d2b66.slice\": RecentStats: unable to find data in memory cache]" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.828266 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"e9527a05-6356-4ee8-8e07-5557453ad8c2\") " pod="openstack/glance-default-external-api-0" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.828436 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-572bc\" (UniqueName: \"kubernetes.io/projected/e9527a05-6356-4ee8-8e07-5557453ad8c2-kube-api-access-572bc\") pod \"glance-default-external-api-0\" (UID: \"e9527a05-6356-4ee8-8e07-5557453ad8c2\") " pod="openstack/glance-default-external-api-0" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.828510 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9527a05-6356-4ee8-8e07-5557453ad8c2-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"e9527a05-6356-4ee8-8e07-5557453ad8c2\") " pod="openstack/glance-default-external-api-0" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.828541 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9527a05-6356-4ee8-8e07-5557453ad8c2-logs\") pod \"glance-default-external-api-0\" (UID: \"e9527a05-6356-4ee8-8e07-5557453ad8c2\") " pod="openstack/glance-default-external-api-0" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.828566 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e9527a05-6356-4ee8-8e07-5557453ad8c2-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"e9527a05-6356-4ee8-8e07-5557453ad8c2\") " pod="openstack/glance-default-external-api-0" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.828679 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9527a05-6356-4ee8-8e07-5557453ad8c2-config-data\") pod \"glance-default-external-api-0\" (UID: \"e9527a05-6356-4ee8-8e07-5557453ad8c2\") " pod="openstack/glance-default-external-api-0" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.828720 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9527a05-6356-4ee8-8e07-5557453ad8c2-scripts\") pod \"glance-default-external-api-0\" (UID: \"e9527a05-6356-4ee8-8e07-5557453ad8c2\") " pod="openstack/glance-default-external-api-0" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.828766 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9527a05-6356-4ee8-8e07-5557453ad8c2-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"e9527a05-6356-4ee8-8e07-5557453ad8c2\") " pod="openstack/glance-default-external-api-0" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.930581 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-572bc\" (UniqueName: \"kubernetes.io/projected/e9527a05-6356-4ee8-8e07-5557453ad8c2-kube-api-access-572bc\") pod \"glance-default-external-api-0\" (UID: \"e9527a05-6356-4ee8-8e07-5557453ad8c2\") " pod="openstack/glance-default-external-api-0" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.930660 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9527a05-6356-4ee8-8e07-5557453ad8c2-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"e9527a05-6356-4ee8-8e07-5557453ad8c2\") " pod="openstack/glance-default-external-api-0" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.930685 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9527a05-6356-4ee8-8e07-5557453ad8c2-logs\") pod \"glance-default-external-api-0\" (UID: \"e9527a05-6356-4ee8-8e07-5557453ad8c2\") " pod="openstack/glance-default-external-api-0" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.930721 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e9527a05-6356-4ee8-8e07-5557453ad8c2-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"e9527a05-6356-4ee8-8e07-5557453ad8c2\") " pod="openstack/glance-default-external-api-0" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.930772 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9527a05-6356-4ee8-8e07-5557453ad8c2-config-data\") pod \"glance-default-external-api-0\" (UID: \"e9527a05-6356-4ee8-8e07-5557453ad8c2\") " pod="openstack/glance-default-external-api-0" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.930791 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9527a05-6356-4ee8-8e07-5557453ad8c2-scripts\") pod \"glance-default-external-api-0\" (UID: \"e9527a05-6356-4ee8-8e07-5557453ad8c2\") " pod="openstack/glance-default-external-api-0" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.930818 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9527a05-6356-4ee8-8e07-5557453ad8c2-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"e9527a05-6356-4ee8-8e07-5557453ad8c2\") " pod="openstack/glance-default-external-api-0" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.930843 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"e9527a05-6356-4ee8-8e07-5557453ad8c2\") " pod="openstack/glance-default-external-api-0" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.931241 4903 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"e9527a05-6356-4ee8-8e07-5557453ad8c2\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.931551 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e9527a05-6356-4ee8-8e07-5557453ad8c2-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"e9527a05-6356-4ee8-8e07-5557453ad8c2\") " pod="openstack/glance-default-external-api-0" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.931952 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9527a05-6356-4ee8-8e07-5557453ad8c2-logs\") pod \"glance-default-external-api-0\" (UID: \"e9527a05-6356-4ee8-8e07-5557453ad8c2\") " pod="openstack/glance-default-external-api-0" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.938462 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9527a05-6356-4ee8-8e07-5557453ad8c2-config-data\") pod \"glance-default-external-api-0\" (UID: \"e9527a05-6356-4ee8-8e07-5557453ad8c2\") " pod="openstack/glance-default-external-api-0" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.939224 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9527a05-6356-4ee8-8e07-5557453ad8c2-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"e9527a05-6356-4ee8-8e07-5557453ad8c2\") " pod="openstack/glance-default-external-api-0" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.939404 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9527a05-6356-4ee8-8e07-5557453ad8c2-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"e9527a05-6356-4ee8-8e07-5557453ad8c2\") " pod="openstack/glance-default-external-api-0" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.955734 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9527a05-6356-4ee8-8e07-5557453ad8c2-scripts\") pod \"glance-default-external-api-0\" (UID: \"e9527a05-6356-4ee8-8e07-5557453ad8c2\") " pod="openstack/glance-default-external-api-0" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.958375 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-572bc\" (UniqueName: \"kubernetes.io/projected/e9527a05-6356-4ee8-8e07-5557453ad8c2-kube-api-access-572bc\") pod \"glance-default-external-api-0\" (UID: \"e9527a05-6356-4ee8-8e07-5557453ad8c2\") " pod="openstack/glance-default-external-api-0" Nov 26 22:43:29 crc kubenswrapper[4903]: I1126 22:43:29.971979 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"e9527a05-6356-4ee8-8e07-5557453ad8c2\") " pod="openstack/glance-default-external-api-0" Nov 26 22:43:30 crc kubenswrapper[4903]: I1126 22:43:30.023582 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 22:43:30 crc kubenswrapper[4903]: I1126 22:43:30.046935 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="635b390e-4ef6-41fb-a81d-cf4f819d2b66" path="/var/lib/kubelet/pods/635b390e-4ef6-41fb-a81d-cf4f819d2b66/volumes" Nov 26 22:43:30 crc kubenswrapper[4903]: I1126 22:43:30.561099 4903 scope.go:117] "RemoveContainer" containerID="04fd4c1fd269b8b643c938a5a207d73a61874434cb596a92e44b222b49f040ef" Nov 26 22:43:30 crc kubenswrapper[4903]: E1126 22:43:30.561723 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-7777cd5989-t54l6_openstack(1d40a43f-69f3-4468-acbc-f9730dde2cd3)\"" pod="openstack/heat-cfnapi-7777cd5989-t54l6" podUID="1d40a43f-69f3-4468-acbc-f9730dde2cd3" Nov 26 22:43:30 crc kubenswrapper[4903]: I1126 22:43:30.641269 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-79cc4cbd9-r6z9s" Nov 26 22:43:30 crc kubenswrapper[4903]: I1126 22:43:30.664434 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 22:43:31 crc kubenswrapper[4903]: I1126 22:43:31.618248 4903 generic.go:334] "Generic (PLEG): container finished" podID="306f5561-d47c-4e76-a4ab-5320d327954d" containerID="f51c317ace15fdc8c4f6bc5041bb57e433115de30aa770ec064ccecbb29c7035" exitCode=0 Nov 26 22:43:31 crc kubenswrapper[4903]: I1126 22:43:31.618601 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"306f5561-d47c-4e76-a4ab-5320d327954d","Type":"ContainerDied","Data":"f51c317ace15fdc8c4f6bc5041bb57e433115de30aa770ec064ccecbb29c7035"} Nov 26 22:43:31 crc kubenswrapper[4903]: I1126 22:43:31.620515 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e9527a05-6356-4ee8-8e07-5557453ad8c2","Type":"ContainerStarted","Data":"f12c84137ee537c585d5e7ce8426d9397f18e560f313ffd288f27733021f5830"} Nov 26 22:43:31 crc kubenswrapper[4903]: I1126 22:43:31.620573 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e9527a05-6356-4ee8-8e07-5557453ad8c2","Type":"ContainerStarted","Data":"7b21bab4d04c6240cbd8f6bcb05b6a521145143cd1578eb479fc738917ec2dd7"} Nov 26 22:43:31 crc kubenswrapper[4903]: I1126 22:43:31.737047 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 22:43:31 crc kubenswrapper[4903]: I1126 22:43:31.778524 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-67fbd86d69-b26tj" Nov 26 22:43:31 crc kubenswrapper[4903]: I1126 22:43:31.804619 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"306f5561-d47c-4e76-a4ab-5320d327954d\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " Nov 26 22:43:31 crc kubenswrapper[4903]: I1126 22:43:31.804701 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n9qxb\" (UniqueName: \"kubernetes.io/projected/306f5561-d47c-4e76-a4ab-5320d327954d-kube-api-access-n9qxb\") pod \"306f5561-d47c-4e76-a4ab-5320d327954d\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " Nov 26 22:43:31 crc kubenswrapper[4903]: I1126 22:43:31.804854 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/306f5561-d47c-4e76-a4ab-5320d327954d-combined-ca-bundle\") pod \"306f5561-d47c-4e76-a4ab-5320d327954d\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " Nov 26 22:43:31 crc kubenswrapper[4903]: I1126 22:43:31.805057 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/306f5561-d47c-4e76-a4ab-5320d327954d-logs\") pod \"306f5561-d47c-4e76-a4ab-5320d327954d\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " Nov 26 22:43:31 crc kubenswrapper[4903]: I1126 22:43:31.805083 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/306f5561-d47c-4e76-a4ab-5320d327954d-internal-tls-certs\") pod \"306f5561-d47c-4e76-a4ab-5320d327954d\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " Nov 26 22:43:31 crc kubenswrapper[4903]: I1126 22:43:31.805104 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/306f5561-d47c-4e76-a4ab-5320d327954d-httpd-run\") pod \"306f5561-d47c-4e76-a4ab-5320d327954d\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " Nov 26 22:43:31 crc kubenswrapper[4903]: I1126 22:43:31.805135 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/306f5561-d47c-4e76-a4ab-5320d327954d-config-data\") pod \"306f5561-d47c-4e76-a4ab-5320d327954d\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " Nov 26 22:43:31 crc kubenswrapper[4903]: I1126 22:43:31.805161 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/306f5561-d47c-4e76-a4ab-5320d327954d-scripts\") pod \"306f5561-d47c-4e76-a4ab-5320d327954d\" (UID: \"306f5561-d47c-4e76-a4ab-5320d327954d\") " Nov 26 22:43:31 crc kubenswrapper[4903]: I1126 22:43:31.806109 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/306f5561-d47c-4e76-a4ab-5320d327954d-logs" (OuterVolumeSpecName: "logs") pod "306f5561-d47c-4e76-a4ab-5320d327954d" (UID: "306f5561-d47c-4e76-a4ab-5320d327954d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:43:31 crc kubenswrapper[4903]: I1126 22:43:31.806389 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/306f5561-d47c-4e76-a4ab-5320d327954d-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "306f5561-d47c-4e76-a4ab-5320d327954d" (UID: "306f5561-d47c-4e76-a4ab-5320d327954d"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:43:31 crc kubenswrapper[4903]: I1126 22:43:31.827543 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance") pod "306f5561-d47c-4e76-a4ab-5320d327954d" (UID: "306f5561-d47c-4e76-a4ab-5320d327954d"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 22:43:31 crc kubenswrapper[4903]: I1126 22:43:31.827647 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-8c659cc69-lgxnn"] Nov 26 22:43:31 crc kubenswrapper[4903]: I1126 22:43:31.828056 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/306f5561-d47c-4e76-a4ab-5320d327954d-scripts" (OuterVolumeSpecName: "scripts") pod "306f5561-d47c-4e76-a4ab-5320d327954d" (UID: "306f5561-d47c-4e76-a4ab-5320d327954d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:31 crc kubenswrapper[4903]: I1126 22:43:31.864066 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/306f5561-d47c-4e76-a4ab-5320d327954d-kube-api-access-n9qxb" (OuterVolumeSpecName: "kube-api-access-n9qxb") pod "306f5561-d47c-4e76-a4ab-5320d327954d" (UID: "306f5561-d47c-4e76-a4ab-5320d327954d"). InnerVolumeSpecName "kube-api-access-n9qxb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:43:31 crc kubenswrapper[4903]: I1126 22:43:31.869881 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/306f5561-d47c-4e76-a4ab-5320d327954d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "306f5561-d47c-4e76-a4ab-5320d327954d" (UID: "306f5561-d47c-4e76-a4ab-5320d327954d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:31 crc kubenswrapper[4903]: I1126 22:43:31.923733 4903 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/306f5561-d47c-4e76-a4ab-5320d327954d-logs\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:31 crc kubenswrapper[4903]: I1126 22:43:31.930037 4903 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/306f5561-d47c-4e76-a4ab-5320d327954d-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:31 crc kubenswrapper[4903]: I1126 22:43:31.930061 4903 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/306f5561-d47c-4e76-a4ab-5320d327954d-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:31 crc kubenswrapper[4903]: I1126 22:43:31.930085 4903 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 26 22:43:31 crc kubenswrapper[4903]: I1126 22:43:31.930095 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n9qxb\" (UniqueName: \"kubernetes.io/projected/306f5561-d47c-4e76-a4ab-5320d327954d-kube-api-access-n9qxb\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:31 crc kubenswrapper[4903]: I1126 22:43:31.930114 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/306f5561-d47c-4e76-a4ab-5320d327954d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:31 crc kubenswrapper[4903]: I1126 22:43:31.940993 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/306f5561-d47c-4e76-a4ab-5320d327954d-config-data" (OuterVolumeSpecName: "config-data") pod "306f5561-d47c-4e76-a4ab-5320d327954d" (UID: "306f5561-d47c-4e76-a4ab-5320d327954d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:31 crc kubenswrapper[4903]: I1126 22:43:31.960137 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/306f5561-d47c-4e76-a4ab-5320d327954d-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "306f5561-d47c-4e76-a4ab-5320d327954d" (UID: "306f5561-d47c-4e76-a4ab-5320d327954d"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:31 crc kubenswrapper[4903]: I1126 22:43:31.985600 4903 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.014076 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.034095 4903 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.034133 4903 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/306f5561-d47c-4e76-a4ab-5320d327954d-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.034147 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/306f5561-d47c-4e76-a4ab-5320d327954d-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.095306 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-7777cd5989-t54l6"] Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.415061 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-8c659cc69-lgxnn" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.446842 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bvnkl\" (UniqueName: \"kubernetes.io/projected/3e2b20bd-f20a-485d-9020-b09ec0084c13-kube-api-access-bvnkl\") pod \"3e2b20bd-f20a-485d-9020-b09ec0084c13\" (UID: \"3e2b20bd-f20a-485d-9020-b09ec0084c13\") " Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.446911 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3e2b20bd-f20a-485d-9020-b09ec0084c13-config-data-custom\") pod \"3e2b20bd-f20a-485d-9020-b09ec0084c13\" (UID: \"3e2b20bd-f20a-485d-9020-b09ec0084c13\") " Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.446983 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e2b20bd-f20a-485d-9020-b09ec0084c13-config-data\") pod \"3e2b20bd-f20a-485d-9020-b09ec0084c13\" (UID: \"3e2b20bd-f20a-485d-9020-b09ec0084c13\") " Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.447903 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e2b20bd-f20a-485d-9020-b09ec0084c13-combined-ca-bundle\") pod \"3e2b20bd-f20a-485d-9020-b09ec0084c13\" (UID: \"3e2b20bd-f20a-485d-9020-b09ec0084c13\") " Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.467916 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e2b20bd-f20a-485d-9020-b09ec0084c13-kube-api-access-bvnkl" (OuterVolumeSpecName: "kube-api-access-bvnkl") pod "3e2b20bd-f20a-485d-9020-b09ec0084c13" (UID: "3e2b20bd-f20a-485d-9020-b09ec0084c13"). InnerVolumeSpecName "kube-api-access-bvnkl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.487660 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e2b20bd-f20a-485d-9020-b09ec0084c13-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3e2b20bd-f20a-485d-9020-b09ec0084c13" (UID: "3e2b20bd-f20a-485d-9020-b09ec0084c13"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.488412 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e2b20bd-f20a-485d-9020-b09ec0084c13-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "3e2b20bd-f20a-485d-9020-b09ec0084c13" (UID: "3e2b20bd-f20a-485d-9020-b09ec0084c13"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.512876 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e2b20bd-f20a-485d-9020-b09ec0084c13-config-data" (OuterVolumeSpecName: "config-data") pod "3e2b20bd-f20a-485d-9020-b09ec0084c13" (UID: "3e2b20bd-f20a-485d-9020-b09ec0084c13"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.555178 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e2b20bd-f20a-485d-9020-b09ec0084c13-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.555202 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bvnkl\" (UniqueName: \"kubernetes.io/projected/3e2b20bd-f20a-485d-9020-b09ec0084c13-kube-api-access-bvnkl\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.555214 4903 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3e2b20bd-f20a-485d-9020-b09ec0084c13-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.555222 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e2b20bd-f20a-485d-9020-b09ec0084c13-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.640499 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-8c659cc69-lgxnn" event={"ID":"3e2b20bd-f20a-485d-9020-b09ec0084c13","Type":"ContainerDied","Data":"3f32f9aa0621e2dd61ac9dc9db2852783633f9dd71526a70670828c631e93bc7"} Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.640559 4903 scope.go:117] "RemoveContainer" containerID="9f0fd9b55e977b12ee2cce922617380cdfbd0d17b51703bd3615f09a9c20262f" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.640508 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-8c659cc69-lgxnn" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.643830 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"306f5561-d47c-4e76-a4ab-5320d327954d","Type":"ContainerDied","Data":"9f8af7dedbeb59b532171464238d9c4cf2df6bd3dc35a84d87c8a0cff165dec6"} Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.643978 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.651788 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e9527a05-6356-4ee8-8e07-5557453ad8c2","Type":"ContainerStarted","Data":"3012b982db555d9974679f9ed109204cb698f01bb6d779a82ebd107bc0df7105"} Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.685584 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.6855673380000002 podStartE2EDuration="3.685567338s" podCreationTimestamp="2025-11-26 22:43:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:43:32.672523279 +0000 UTC m=+1341.362758189" watchObservedRunningTime="2025-11-26 22:43:32.685567338 +0000 UTC m=+1341.375802248" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.701524 4903 scope.go:117] "RemoveContainer" containerID="f51c317ace15fdc8c4f6bc5041bb57e433115de30aa770ec064ccecbb29c7035" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.731758 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7777cd5989-t54l6" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.735820 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.751370 4903 scope.go:117] "RemoveContainer" containerID="8505d04695a9d0c561bd090e0fc5fde3adca7845b5084012c0e05e5fd474d197" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.758039 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d40a43f-69f3-4468-acbc-f9730dde2cd3-config-data\") pod \"1d40a43f-69f3-4468-acbc-f9730dde2cd3\" (UID: \"1d40a43f-69f3-4468-acbc-f9730dde2cd3\") " Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.758149 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d40a43f-69f3-4468-acbc-f9730dde2cd3-combined-ca-bundle\") pod \"1d40a43f-69f3-4468-acbc-f9730dde2cd3\" (UID: \"1d40a43f-69f3-4468-acbc-f9730dde2cd3\") " Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.758249 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1d40a43f-69f3-4468-acbc-f9730dde2cd3-config-data-custom\") pod \"1d40a43f-69f3-4468-acbc-f9730dde2cd3\" (UID: \"1d40a43f-69f3-4468-acbc-f9730dde2cd3\") " Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.758350 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vm94d\" (UniqueName: \"kubernetes.io/projected/1d40a43f-69f3-4468-acbc-f9730dde2cd3-kube-api-access-vm94d\") pod \"1d40a43f-69f3-4468-acbc-f9730dde2cd3\" (UID: \"1d40a43f-69f3-4468-acbc-f9730dde2cd3\") " Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.766461 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.767080 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d40a43f-69f3-4468-acbc-f9730dde2cd3-kube-api-access-vm94d" (OuterVolumeSpecName: "kube-api-access-vm94d") pod "1d40a43f-69f3-4468-acbc-f9730dde2cd3" (UID: "1d40a43f-69f3-4468-acbc-f9730dde2cd3"). InnerVolumeSpecName "kube-api-access-vm94d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.769849 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d40a43f-69f3-4468-acbc-f9730dde2cd3-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "1d40a43f-69f3-4468-acbc-f9730dde2cd3" (UID: "1d40a43f-69f3-4468-acbc-f9730dde2cd3"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.778512 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-8c659cc69-lgxnn"] Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.790563 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-8c659cc69-lgxnn"] Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.812102 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 22:43:32 crc kubenswrapper[4903]: E1126 22:43:32.812619 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d40a43f-69f3-4468-acbc-f9730dde2cd3" containerName="heat-cfnapi" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.812647 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d40a43f-69f3-4468-acbc-f9730dde2cd3" containerName="heat-cfnapi" Nov 26 22:43:32 crc kubenswrapper[4903]: E1126 22:43:32.812667 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="306f5561-d47c-4e76-a4ab-5320d327954d" containerName="glance-log" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.812673 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="306f5561-d47c-4e76-a4ab-5320d327954d" containerName="glance-log" Nov 26 22:43:32 crc kubenswrapper[4903]: E1126 22:43:32.812700 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e2b20bd-f20a-485d-9020-b09ec0084c13" containerName="heat-api" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.812707 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e2b20bd-f20a-485d-9020-b09ec0084c13" containerName="heat-api" Nov 26 22:43:32 crc kubenswrapper[4903]: E1126 22:43:32.812751 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="306f5561-d47c-4e76-a4ab-5320d327954d" containerName="glance-httpd" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.812757 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="306f5561-d47c-4e76-a4ab-5320d327954d" containerName="glance-httpd" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.812957 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d40a43f-69f3-4468-acbc-f9730dde2cd3" containerName="heat-cfnapi" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.812978 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e2b20bd-f20a-485d-9020-b09ec0084c13" containerName="heat-api" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.812990 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d40a43f-69f3-4468-acbc-f9730dde2cd3" containerName="heat-cfnapi" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.813000 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="306f5561-d47c-4e76-a4ab-5320d327954d" containerName="glance-httpd" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.813020 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="306f5561-d47c-4e76-a4ab-5320d327954d" containerName="glance-log" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.813031 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e2b20bd-f20a-485d-9020-b09ec0084c13" containerName="heat-api" Nov 26 22:43:32 crc kubenswrapper[4903]: E1126 22:43:32.813216 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d40a43f-69f3-4468-acbc-f9730dde2cd3" containerName="heat-cfnapi" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.813231 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d40a43f-69f3-4468-acbc-f9730dde2cd3" containerName="heat-cfnapi" Nov 26 22:43:32 crc kubenswrapper[4903]: E1126 22:43:32.813245 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e2b20bd-f20a-485d-9020-b09ec0084c13" containerName="heat-api" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.813251 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e2b20bd-f20a-485d-9020-b09ec0084c13" containerName="heat-api" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.816085 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.819746 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.819958 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.824857 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.826002 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d40a43f-69f3-4468-acbc-f9730dde2cd3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1d40a43f-69f3-4468-acbc-f9730dde2cd3" (UID: "1d40a43f-69f3-4468-acbc-f9730dde2cd3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.860832 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d500fa23-7825-4dde-95b4-dce1b93b24cb-logs\") pod \"glance-default-internal-api-0\" (UID: \"d500fa23-7825-4dde-95b4-dce1b93b24cb\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.861077 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d500fa23-7825-4dde-95b4-dce1b93b24cb-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d500fa23-7825-4dde-95b4-dce1b93b24cb\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.861241 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d500fa23-7825-4dde-95b4-dce1b93b24cb-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d500fa23-7825-4dde-95b4-dce1b93b24cb\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.861371 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6x2gj\" (UniqueName: \"kubernetes.io/projected/d500fa23-7825-4dde-95b4-dce1b93b24cb-kube-api-access-6x2gj\") pod \"glance-default-internal-api-0\" (UID: \"d500fa23-7825-4dde-95b4-dce1b93b24cb\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.861502 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"d500fa23-7825-4dde-95b4-dce1b93b24cb\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.861620 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d500fa23-7825-4dde-95b4-dce1b93b24cb-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"d500fa23-7825-4dde-95b4-dce1b93b24cb\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.861752 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d500fa23-7825-4dde-95b4-dce1b93b24cb-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d500fa23-7825-4dde-95b4-dce1b93b24cb\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.861844 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d500fa23-7825-4dde-95b4-dce1b93b24cb-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"d500fa23-7825-4dde-95b4-dce1b93b24cb\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.862025 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d40a43f-69f3-4468-acbc-f9730dde2cd3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.862103 4903 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1d40a43f-69f3-4468-acbc-f9730dde2cd3-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.862189 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vm94d\" (UniqueName: \"kubernetes.io/projected/1d40a43f-69f3-4468-acbc-f9730dde2cd3-kube-api-access-vm94d\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.870305 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d40a43f-69f3-4468-acbc-f9730dde2cd3-config-data" (OuterVolumeSpecName: "config-data") pod "1d40a43f-69f3-4468-acbc-f9730dde2cd3" (UID: "1d40a43f-69f3-4468-acbc-f9730dde2cd3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.966667 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d500fa23-7825-4dde-95b4-dce1b93b24cb-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d500fa23-7825-4dde-95b4-dce1b93b24cb\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.966803 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d500fa23-7825-4dde-95b4-dce1b93b24cb-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"d500fa23-7825-4dde-95b4-dce1b93b24cb\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.966907 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d500fa23-7825-4dde-95b4-dce1b93b24cb-logs\") pod \"glance-default-internal-api-0\" (UID: \"d500fa23-7825-4dde-95b4-dce1b93b24cb\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.966935 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d500fa23-7825-4dde-95b4-dce1b93b24cb-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d500fa23-7825-4dde-95b4-dce1b93b24cb\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.967023 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d500fa23-7825-4dde-95b4-dce1b93b24cb-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d500fa23-7825-4dde-95b4-dce1b93b24cb\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.967085 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6x2gj\" (UniqueName: \"kubernetes.io/projected/d500fa23-7825-4dde-95b4-dce1b93b24cb-kube-api-access-6x2gj\") pod \"glance-default-internal-api-0\" (UID: \"d500fa23-7825-4dde-95b4-dce1b93b24cb\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.967149 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"d500fa23-7825-4dde-95b4-dce1b93b24cb\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.967171 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d500fa23-7825-4dde-95b4-dce1b93b24cb-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"d500fa23-7825-4dde-95b4-dce1b93b24cb\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.967399 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d40a43f-69f3-4468-acbc-f9730dde2cd3-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.967563 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d500fa23-7825-4dde-95b4-dce1b93b24cb-logs\") pod \"glance-default-internal-api-0\" (UID: \"d500fa23-7825-4dde-95b4-dce1b93b24cb\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.968045 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d500fa23-7825-4dde-95b4-dce1b93b24cb-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d500fa23-7825-4dde-95b4-dce1b93b24cb\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.970269 4903 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"d500fa23-7825-4dde-95b4-dce1b93b24cb\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-internal-api-0" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.971318 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d500fa23-7825-4dde-95b4-dce1b93b24cb-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d500fa23-7825-4dde-95b4-dce1b93b24cb\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.973039 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d500fa23-7825-4dde-95b4-dce1b93b24cb-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"d500fa23-7825-4dde-95b4-dce1b93b24cb\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.982392 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d500fa23-7825-4dde-95b4-dce1b93b24cb-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"d500fa23-7825-4dde-95b4-dce1b93b24cb\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.982600 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d500fa23-7825-4dde-95b4-dce1b93b24cb-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d500fa23-7825-4dde-95b4-dce1b93b24cb\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:43:32 crc kubenswrapper[4903]: I1126 22:43:32.990733 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6x2gj\" (UniqueName: \"kubernetes.io/projected/d500fa23-7825-4dde-95b4-dce1b93b24cb-kube-api-access-6x2gj\") pod \"glance-default-internal-api-0\" (UID: \"d500fa23-7825-4dde-95b4-dce1b93b24cb\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:43:33 crc kubenswrapper[4903]: I1126 22:43:33.005438 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"d500fa23-7825-4dde-95b4-dce1b93b24cb\") " pod="openstack/glance-default-internal-api-0" Nov 26 22:43:33 crc kubenswrapper[4903]: I1126 22:43:33.136297 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 22:43:33 crc kubenswrapper[4903]: I1126 22:43:33.674478 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7777cd5989-t54l6" event={"ID":"1d40a43f-69f3-4468-acbc-f9730dde2cd3","Type":"ContainerDied","Data":"68e8ab7fc5ef91836d34a8d8dcdc575ab5ceec782da27ee464207e5489b64a49"} Nov 26 22:43:33 crc kubenswrapper[4903]: I1126 22:43:33.675713 4903 scope.go:117] "RemoveContainer" containerID="04fd4c1fd269b8b643c938a5a207d73a61874434cb596a92e44b222b49f040ef" Nov 26 22:43:33 crc kubenswrapper[4903]: I1126 22:43:33.676044 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7777cd5989-t54l6" Nov 26 22:43:33 crc kubenswrapper[4903]: I1126 22:43:33.740455 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 22:43:33 crc kubenswrapper[4903]: I1126 22:43:33.757115 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-7777cd5989-t54l6"] Nov 26 22:43:33 crc kubenswrapper[4903]: I1126 22:43:33.767597 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-7777cd5989-t54l6"] Nov 26 22:43:34 crc kubenswrapper[4903]: I1126 22:43:34.047268 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d40a43f-69f3-4468-acbc-f9730dde2cd3" path="/var/lib/kubelet/pods/1d40a43f-69f3-4468-acbc-f9730dde2cd3/volumes" Nov 26 22:43:34 crc kubenswrapper[4903]: I1126 22:43:34.048474 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="306f5561-d47c-4e76-a4ab-5320d327954d" path="/var/lib/kubelet/pods/306f5561-d47c-4e76-a4ab-5320d327954d/volumes" Nov 26 22:43:34 crc kubenswrapper[4903]: I1126 22:43:34.049617 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e2b20bd-f20a-485d-9020-b09ec0084c13" path="/var/lib/kubelet/pods/3e2b20bd-f20a-485d-9020-b09ec0084c13/volumes" Nov 26 22:43:34 crc kubenswrapper[4903]: I1126 22:43:34.695116 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d500fa23-7825-4dde-95b4-dce1b93b24cb","Type":"ContainerStarted","Data":"c972eeecc26ef05dadd9efca9aa2587cbddaf203f60c43469b543056fb0fe125"} Nov 26 22:43:34 crc kubenswrapper[4903]: I1126 22:43:34.695449 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d500fa23-7825-4dde-95b4-dce1b93b24cb","Type":"ContainerStarted","Data":"b07560603891edc89c1da1fedf9435f05ca85580959867686b4cbcb80c0bac54"} Nov 26 22:43:35 crc kubenswrapper[4903]: I1126 22:43:35.712446 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d500fa23-7825-4dde-95b4-dce1b93b24cb","Type":"ContainerStarted","Data":"2a323866456bd79f585a468a4b2dec83a6bef5849b8e27581e1562f17a24860c"} Nov 26 22:43:35 crc kubenswrapper[4903]: I1126 22:43:35.742448 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.742425591 podStartE2EDuration="3.742425591s" podCreationTimestamp="2025-11-26 22:43:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:43:35.735089614 +0000 UTC m=+1344.425324524" watchObservedRunningTime="2025-11-26 22:43:35.742425591 +0000 UTC m=+1344.432660501" Nov 26 22:43:36 crc kubenswrapper[4903]: I1126 22:43:36.722788 4903 generic.go:334] "Generic (PLEG): container finished" podID="6573f0cf-2bad-494e-8288-1c68c4326edb" containerID="277db87d3f7eed539d07fcdf860ff73245de3492f776f6db8aab98b2df719493" exitCode=0 Nov 26 22:43:36 crc kubenswrapper[4903]: I1126 22:43:36.722897 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-sfl5w" event={"ID":"6573f0cf-2bad-494e-8288-1c68c4326edb","Type":"ContainerDied","Data":"277db87d3f7eed539d07fcdf860ff73245de3492f776f6db8aab98b2df719493"} Nov 26 22:43:38 crc kubenswrapper[4903]: I1126 22:43:38.270390 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-sfl5w" Nov 26 22:43:38 crc kubenswrapper[4903]: I1126 22:43:38.338551 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v7znt\" (UniqueName: \"kubernetes.io/projected/6573f0cf-2bad-494e-8288-1c68c4326edb-kube-api-access-v7znt\") pod \"6573f0cf-2bad-494e-8288-1c68c4326edb\" (UID: \"6573f0cf-2bad-494e-8288-1c68c4326edb\") " Nov 26 22:43:38 crc kubenswrapper[4903]: I1126 22:43:38.338622 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6573f0cf-2bad-494e-8288-1c68c4326edb-scripts\") pod \"6573f0cf-2bad-494e-8288-1c68c4326edb\" (UID: \"6573f0cf-2bad-494e-8288-1c68c4326edb\") " Nov 26 22:43:38 crc kubenswrapper[4903]: I1126 22:43:38.338785 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6573f0cf-2bad-494e-8288-1c68c4326edb-config-data\") pod \"6573f0cf-2bad-494e-8288-1c68c4326edb\" (UID: \"6573f0cf-2bad-494e-8288-1c68c4326edb\") " Nov 26 22:43:38 crc kubenswrapper[4903]: I1126 22:43:38.338988 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6573f0cf-2bad-494e-8288-1c68c4326edb-combined-ca-bundle\") pod \"6573f0cf-2bad-494e-8288-1c68c4326edb\" (UID: \"6573f0cf-2bad-494e-8288-1c68c4326edb\") " Nov 26 22:43:38 crc kubenswrapper[4903]: I1126 22:43:38.345822 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6573f0cf-2bad-494e-8288-1c68c4326edb-scripts" (OuterVolumeSpecName: "scripts") pod "6573f0cf-2bad-494e-8288-1c68c4326edb" (UID: "6573f0cf-2bad-494e-8288-1c68c4326edb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:38 crc kubenswrapper[4903]: I1126 22:43:38.364944 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6573f0cf-2bad-494e-8288-1c68c4326edb-kube-api-access-v7znt" (OuterVolumeSpecName: "kube-api-access-v7znt") pod "6573f0cf-2bad-494e-8288-1c68c4326edb" (UID: "6573f0cf-2bad-494e-8288-1c68c4326edb"). InnerVolumeSpecName "kube-api-access-v7znt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:43:38 crc kubenswrapper[4903]: I1126 22:43:38.380764 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6573f0cf-2bad-494e-8288-1c68c4326edb-config-data" (OuterVolumeSpecName: "config-data") pod "6573f0cf-2bad-494e-8288-1c68c4326edb" (UID: "6573f0cf-2bad-494e-8288-1c68c4326edb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:38 crc kubenswrapper[4903]: I1126 22:43:38.391895 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6573f0cf-2bad-494e-8288-1c68c4326edb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6573f0cf-2bad-494e-8288-1c68c4326edb" (UID: "6573f0cf-2bad-494e-8288-1c68c4326edb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:38 crc kubenswrapper[4903]: I1126 22:43:38.441463 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6573f0cf-2bad-494e-8288-1c68c4326edb-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:38 crc kubenswrapper[4903]: I1126 22:43:38.441497 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6573f0cf-2bad-494e-8288-1c68c4326edb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:38 crc kubenswrapper[4903]: I1126 22:43:38.441509 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v7znt\" (UniqueName: \"kubernetes.io/projected/6573f0cf-2bad-494e-8288-1c68c4326edb-kube-api-access-v7znt\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:38 crc kubenswrapper[4903]: I1126 22:43:38.441518 4903 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6573f0cf-2bad-494e-8288-1c68c4326edb-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:38 crc kubenswrapper[4903]: I1126 22:43:38.747362 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-sfl5w" event={"ID":"6573f0cf-2bad-494e-8288-1c68c4326edb","Type":"ContainerDied","Data":"239e38403d587306c19bc4055464046046c608f376353f2affec3d30e4040675"} Nov 26 22:43:38 crc kubenswrapper[4903]: I1126 22:43:38.747411 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="239e38403d587306c19bc4055464046046c608f376353f2affec3d30e4040675" Nov 26 22:43:38 crc kubenswrapper[4903]: I1126 22:43:38.747479 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-sfl5w" Nov 26 22:43:38 crc kubenswrapper[4903]: I1126 22:43:38.779072 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-6c48d846b-6vkx8" Nov 26 22:43:39 crc kubenswrapper[4903]: I1126 22:43:39.441786 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-79cc4cbd9-r6z9s"] Nov 26 22:43:39 crc kubenswrapper[4903]: I1126 22:43:39.441990 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-79cc4cbd9-r6z9s" podUID="b2447923-029d-4799-9adf-bd5fd9d44338" containerName="heat-engine" containerID="cri-o://f2453b02c94fc71e33249f8f1c2f944ea16e1c243f91b038cc4c3a29e398ab7a" gracePeriod=60 Nov 26 22:43:39 crc kubenswrapper[4903]: I1126 22:43:39.471796 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 22:43:39 crc kubenswrapper[4903]: E1126 22:43:39.472305 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6573f0cf-2bad-494e-8288-1c68c4326edb" containerName="nova-cell0-conductor-db-sync" Nov 26 22:43:39 crc kubenswrapper[4903]: I1126 22:43:39.472321 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="6573f0cf-2bad-494e-8288-1c68c4326edb" containerName="nova-cell0-conductor-db-sync" Nov 26 22:43:39 crc kubenswrapper[4903]: I1126 22:43:39.472567 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="6573f0cf-2bad-494e-8288-1c68c4326edb" containerName="nova-cell0-conductor-db-sync" Nov 26 22:43:39 crc kubenswrapper[4903]: I1126 22:43:39.473399 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 22:43:39 crc kubenswrapper[4903]: I1126 22:43:39.476898 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-kl6hf" Nov 26 22:43:39 crc kubenswrapper[4903]: I1126 22:43:39.479660 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 26 22:43:39 crc kubenswrapper[4903]: I1126 22:43:39.499367 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 22:43:39 crc kubenswrapper[4903]: I1126 22:43:39.573262 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47dfaf47-3f8b-4355-8c56-a0955f49d95f-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"47dfaf47-3f8b-4355-8c56-a0955f49d95f\") " pod="openstack/nova-cell0-conductor-0" Nov 26 22:43:39 crc kubenswrapper[4903]: I1126 22:43:39.573376 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgbrk\" (UniqueName: \"kubernetes.io/projected/47dfaf47-3f8b-4355-8c56-a0955f49d95f-kube-api-access-cgbrk\") pod \"nova-cell0-conductor-0\" (UID: \"47dfaf47-3f8b-4355-8c56-a0955f49d95f\") " pod="openstack/nova-cell0-conductor-0" Nov 26 22:43:39 crc kubenswrapper[4903]: I1126 22:43:39.573513 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47dfaf47-3f8b-4355-8c56-a0955f49d95f-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"47dfaf47-3f8b-4355-8c56-a0955f49d95f\") " pod="openstack/nova-cell0-conductor-0" Nov 26 22:43:39 crc kubenswrapper[4903]: I1126 22:43:39.675862 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47dfaf47-3f8b-4355-8c56-a0955f49d95f-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"47dfaf47-3f8b-4355-8c56-a0955f49d95f\") " pod="openstack/nova-cell0-conductor-0" Nov 26 22:43:39 crc kubenswrapper[4903]: I1126 22:43:39.676208 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgbrk\" (UniqueName: \"kubernetes.io/projected/47dfaf47-3f8b-4355-8c56-a0955f49d95f-kube-api-access-cgbrk\") pod \"nova-cell0-conductor-0\" (UID: \"47dfaf47-3f8b-4355-8c56-a0955f49d95f\") " pod="openstack/nova-cell0-conductor-0" Nov 26 22:43:39 crc kubenswrapper[4903]: I1126 22:43:39.676313 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47dfaf47-3f8b-4355-8c56-a0955f49d95f-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"47dfaf47-3f8b-4355-8c56-a0955f49d95f\") " pod="openstack/nova-cell0-conductor-0" Nov 26 22:43:39 crc kubenswrapper[4903]: I1126 22:43:39.683236 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47dfaf47-3f8b-4355-8c56-a0955f49d95f-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"47dfaf47-3f8b-4355-8c56-a0955f49d95f\") " pod="openstack/nova-cell0-conductor-0" Nov 26 22:43:39 crc kubenswrapper[4903]: I1126 22:43:39.683914 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47dfaf47-3f8b-4355-8c56-a0955f49d95f-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"47dfaf47-3f8b-4355-8c56-a0955f49d95f\") " pod="openstack/nova-cell0-conductor-0" Nov 26 22:43:39 crc kubenswrapper[4903]: I1126 22:43:39.691678 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgbrk\" (UniqueName: \"kubernetes.io/projected/47dfaf47-3f8b-4355-8c56-a0955f49d95f-kube-api-access-cgbrk\") pod \"nova-cell0-conductor-0\" (UID: \"47dfaf47-3f8b-4355-8c56-a0955f49d95f\") " pod="openstack/nova-cell0-conductor-0" Nov 26 22:43:39 crc kubenswrapper[4903]: I1126 22:43:39.804968 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 22:43:40 crc kubenswrapper[4903]: I1126 22:43:40.024323 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 22:43:40 crc kubenswrapper[4903]: I1126 22:43:40.024566 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 22:43:40 crc kubenswrapper[4903]: I1126 22:43:40.072475 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 22:43:40 crc kubenswrapper[4903]: I1126 22:43:40.093067 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 22:43:40 crc kubenswrapper[4903]: I1126 22:43:40.262019 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 22:43:40 crc kubenswrapper[4903]: E1126 22:43:40.606075 4903 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f2453b02c94fc71e33249f8f1c2f944ea16e1c243f91b038cc4c3a29e398ab7a" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 26 22:43:40 crc kubenswrapper[4903]: E1126 22:43:40.610768 4903 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f2453b02c94fc71e33249f8f1c2f944ea16e1c243f91b038cc4c3a29e398ab7a" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 26 22:43:40 crc kubenswrapper[4903]: E1126 22:43:40.612569 4903 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f2453b02c94fc71e33249f8f1c2f944ea16e1c243f91b038cc4c3a29e398ab7a" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 26 22:43:40 crc kubenswrapper[4903]: E1126 22:43:40.612634 4903 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-79cc4cbd9-r6z9s" podUID="b2447923-029d-4799-9adf-bd5fd9d44338" containerName="heat-engine" Nov 26 22:43:40 crc kubenswrapper[4903]: I1126 22:43:40.765958 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"47dfaf47-3f8b-4355-8c56-a0955f49d95f","Type":"ContainerStarted","Data":"6148942e8fa845470f1861041c6406ebcd8b27b816cd7093d2681cae0a9c34ea"} Nov 26 22:43:40 crc kubenswrapper[4903]: I1126 22:43:40.766026 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"47dfaf47-3f8b-4355-8c56-a0955f49d95f","Type":"ContainerStarted","Data":"6ce02ed6f5e459cd2a1457edc4db3b06ccf8d41d981e32286857c20aa4c3c9a0"} Nov 26 22:43:40 crc kubenswrapper[4903]: I1126 22:43:40.766181 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 22:43:40 crc kubenswrapper[4903]: I1126 22:43:40.766955 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 22:43:40 crc kubenswrapper[4903]: I1126 22:43:40.781522 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=1.781503572 podStartE2EDuration="1.781503572s" podCreationTimestamp="2025-11-26 22:43:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:43:40.779319094 +0000 UTC m=+1349.469554004" watchObservedRunningTime="2025-11-26 22:43:40.781503572 +0000 UTC m=+1349.471738482" Nov 26 22:43:41 crc kubenswrapper[4903]: I1126 22:43:41.624406 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="3f11fc1d-817f-4c2a-a777-a4ac0f568b07" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.201:3000/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 22:43:41 crc kubenswrapper[4903]: I1126 22:43:41.774369 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 26 22:43:42 crc kubenswrapper[4903]: I1126 22:43:42.784659 4903 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 22:43:42 crc kubenswrapper[4903]: I1126 22:43:42.784915 4903 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 22:43:43 crc kubenswrapper[4903]: I1126 22:43:43.136879 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 22:43:43 crc kubenswrapper[4903]: I1126 22:43:43.136930 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 22:43:43 crc kubenswrapper[4903]: I1126 22:43:43.186432 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 22:43:43 crc kubenswrapper[4903]: I1126 22:43:43.189452 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 22:43:43 crc kubenswrapper[4903]: I1126 22:43:43.511004 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="1e2d4f89-a0e3-43aa-8af6-c07146ef81b2" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 26 22:43:43 crc kubenswrapper[4903]: I1126 22:43:43.592655 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 22:43:43 crc kubenswrapper[4903]: I1126 22:43:43.593156 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 22:43:43 crc kubenswrapper[4903]: I1126 22:43:43.807270 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 22:43:43 crc kubenswrapper[4903]: I1126 22:43:43.814051 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 22:43:45 crc kubenswrapper[4903]: I1126 22:43:45.817346 4903 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 22:43:45 crc kubenswrapper[4903]: I1126 22:43:45.818399 4903 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 22:43:46 crc kubenswrapper[4903]: I1126 22:43:46.053726 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 22:43:46 crc kubenswrapper[4903]: I1126 22:43:46.123399 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 22:43:46 crc kubenswrapper[4903]: I1126 22:43:46.827529 4903 generic.go:334] "Generic (PLEG): container finished" podID="b2447923-029d-4799-9adf-bd5fd9d44338" containerID="f2453b02c94fc71e33249f8f1c2f944ea16e1c243f91b038cc4c3a29e398ab7a" exitCode=0 Nov 26 22:43:46 crc kubenswrapper[4903]: I1126 22:43:46.827600 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-79cc4cbd9-r6z9s" event={"ID":"b2447923-029d-4799-9adf-bd5fd9d44338","Type":"ContainerDied","Data":"f2453b02c94fc71e33249f8f1c2f944ea16e1c243f91b038cc4c3a29e398ab7a"} Nov 26 22:43:46 crc kubenswrapper[4903]: I1126 22:43:46.827820 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-79cc4cbd9-r6z9s" event={"ID":"b2447923-029d-4799-9adf-bd5fd9d44338","Type":"ContainerDied","Data":"1fcf1024577a1379f27af942ce3d8193347965532a85628e5ceb324e52dc492d"} Nov 26 22:43:46 crc kubenswrapper[4903]: I1126 22:43:46.827833 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1fcf1024577a1379f27af942ce3d8193347965532a85628e5ceb324e52dc492d" Nov 26 22:43:46 crc kubenswrapper[4903]: I1126 22:43:46.901224 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-79cc4cbd9-r6z9s" Nov 26 22:43:47 crc kubenswrapper[4903]: I1126 22:43:47.050619 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2447923-029d-4799-9adf-bd5fd9d44338-config-data\") pod \"b2447923-029d-4799-9adf-bd5fd9d44338\" (UID: \"b2447923-029d-4799-9adf-bd5fd9d44338\") " Nov 26 22:43:47 crc kubenswrapper[4903]: I1126 22:43:47.050964 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b2447923-029d-4799-9adf-bd5fd9d44338-config-data-custom\") pod \"b2447923-029d-4799-9adf-bd5fd9d44338\" (UID: \"b2447923-029d-4799-9adf-bd5fd9d44338\") " Nov 26 22:43:47 crc kubenswrapper[4903]: I1126 22:43:47.051063 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2447923-029d-4799-9adf-bd5fd9d44338-combined-ca-bundle\") pod \"b2447923-029d-4799-9adf-bd5fd9d44338\" (UID: \"b2447923-029d-4799-9adf-bd5fd9d44338\") " Nov 26 22:43:47 crc kubenswrapper[4903]: I1126 22:43:47.051094 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k7lpl\" (UniqueName: \"kubernetes.io/projected/b2447923-029d-4799-9adf-bd5fd9d44338-kube-api-access-k7lpl\") pod \"b2447923-029d-4799-9adf-bd5fd9d44338\" (UID: \"b2447923-029d-4799-9adf-bd5fd9d44338\") " Nov 26 22:43:47 crc kubenswrapper[4903]: I1126 22:43:47.058890 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2447923-029d-4799-9adf-bd5fd9d44338-kube-api-access-k7lpl" (OuterVolumeSpecName: "kube-api-access-k7lpl") pod "b2447923-029d-4799-9adf-bd5fd9d44338" (UID: "b2447923-029d-4799-9adf-bd5fd9d44338"). InnerVolumeSpecName "kube-api-access-k7lpl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:43:47 crc kubenswrapper[4903]: I1126 22:43:47.058943 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2447923-029d-4799-9adf-bd5fd9d44338-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "b2447923-029d-4799-9adf-bd5fd9d44338" (UID: "b2447923-029d-4799-9adf-bd5fd9d44338"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:47 crc kubenswrapper[4903]: I1126 22:43:47.084480 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2447923-029d-4799-9adf-bd5fd9d44338-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b2447923-029d-4799-9adf-bd5fd9d44338" (UID: "b2447923-029d-4799-9adf-bd5fd9d44338"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:47 crc kubenswrapper[4903]: I1126 22:43:47.118569 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2447923-029d-4799-9adf-bd5fd9d44338-config-data" (OuterVolumeSpecName: "config-data") pod "b2447923-029d-4799-9adf-bd5fd9d44338" (UID: "b2447923-029d-4799-9adf-bd5fd9d44338"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:47 crc kubenswrapper[4903]: I1126 22:43:47.154219 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2447923-029d-4799-9adf-bd5fd9d44338-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:47 crc kubenswrapper[4903]: I1126 22:43:47.154250 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k7lpl\" (UniqueName: \"kubernetes.io/projected/b2447923-029d-4799-9adf-bd5fd9d44338-kube-api-access-k7lpl\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:47 crc kubenswrapper[4903]: I1126 22:43:47.154262 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2447923-029d-4799-9adf-bd5fd9d44338-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:47 crc kubenswrapper[4903]: I1126 22:43:47.154270 4903 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b2447923-029d-4799-9adf-bd5fd9d44338-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:47 crc kubenswrapper[4903]: I1126 22:43:47.840463 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-79cc4cbd9-r6z9s" Nov 26 22:43:47 crc kubenswrapper[4903]: I1126 22:43:47.912916 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-79cc4cbd9-r6z9s"] Nov 26 22:43:47 crc kubenswrapper[4903]: I1126 22:43:47.927773 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-79cc4cbd9-r6z9s"] Nov 26 22:43:48 crc kubenswrapper[4903]: I1126 22:43:48.044520 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2447923-029d-4799-9adf-bd5fd9d44338" path="/var/lib/kubelet/pods/b2447923-029d-4799-9adf-bd5fd9d44338/volumes" Nov 26 22:43:49 crc kubenswrapper[4903]: I1126 22:43:49.870332 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.337580 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-v8tkk"] Nov 26 22:43:50 crc kubenswrapper[4903]: E1126 22:43:50.338028 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2447923-029d-4799-9adf-bd5fd9d44338" containerName="heat-engine" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.338048 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2447923-029d-4799-9adf-bd5fd9d44338" containerName="heat-engine" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.338333 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2447923-029d-4799-9adf-bd5fd9d44338" containerName="heat-engine" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.339338 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-v8tkk" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.342573 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.343106 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.360396 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-v8tkk"] Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.438646 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2355bf0c-e104-4dcd-888a-e164fd5d89be-scripts\") pod \"nova-cell0-cell-mapping-v8tkk\" (UID: \"2355bf0c-e104-4dcd-888a-e164fd5d89be\") " pod="openstack/nova-cell0-cell-mapping-v8tkk" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.439050 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9slv4\" (UniqueName: \"kubernetes.io/projected/2355bf0c-e104-4dcd-888a-e164fd5d89be-kube-api-access-9slv4\") pod \"nova-cell0-cell-mapping-v8tkk\" (UID: \"2355bf0c-e104-4dcd-888a-e164fd5d89be\") " pod="openstack/nova-cell0-cell-mapping-v8tkk" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.439244 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2355bf0c-e104-4dcd-888a-e164fd5d89be-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-v8tkk\" (UID: \"2355bf0c-e104-4dcd-888a-e164fd5d89be\") " pod="openstack/nova-cell0-cell-mapping-v8tkk" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.439409 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2355bf0c-e104-4dcd-888a-e164fd5d89be-config-data\") pod \"nova-cell0-cell-mapping-v8tkk\" (UID: \"2355bf0c-e104-4dcd-888a-e164fd5d89be\") " pod="openstack/nova-cell0-cell-mapping-v8tkk" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.526341 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.528388 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.531100 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.541029 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2355bf0c-e104-4dcd-888a-e164fd5d89be-config-data\") pod \"nova-cell0-cell-mapping-v8tkk\" (UID: \"2355bf0c-e104-4dcd-888a-e164fd5d89be\") " pod="openstack/nova-cell0-cell-mapping-v8tkk" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.541154 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2355bf0c-e104-4dcd-888a-e164fd5d89be-scripts\") pod \"nova-cell0-cell-mapping-v8tkk\" (UID: \"2355bf0c-e104-4dcd-888a-e164fd5d89be\") " pod="openstack/nova-cell0-cell-mapping-v8tkk" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.541233 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9slv4\" (UniqueName: \"kubernetes.io/projected/2355bf0c-e104-4dcd-888a-e164fd5d89be-kube-api-access-9slv4\") pod \"nova-cell0-cell-mapping-v8tkk\" (UID: \"2355bf0c-e104-4dcd-888a-e164fd5d89be\") " pod="openstack/nova-cell0-cell-mapping-v8tkk" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.541286 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2355bf0c-e104-4dcd-888a-e164fd5d89be-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-v8tkk\" (UID: \"2355bf0c-e104-4dcd-888a-e164fd5d89be\") " pod="openstack/nova-cell0-cell-mapping-v8tkk" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.549374 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2355bf0c-e104-4dcd-888a-e164fd5d89be-config-data\") pod \"nova-cell0-cell-mapping-v8tkk\" (UID: \"2355bf0c-e104-4dcd-888a-e164fd5d89be\") " pod="openstack/nova-cell0-cell-mapping-v8tkk" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.566405 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2355bf0c-e104-4dcd-888a-e164fd5d89be-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-v8tkk\" (UID: \"2355bf0c-e104-4dcd-888a-e164fd5d89be\") " pod="openstack/nova-cell0-cell-mapping-v8tkk" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.570257 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2355bf0c-e104-4dcd-888a-e164fd5d89be-scripts\") pod \"nova-cell0-cell-mapping-v8tkk\" (UID: \"2355bf0c-e104-4dcd-888a-e164fd5d89be\") " pod="openstack/nova-cell0-cell-mapping-v8tkk" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.590913 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.601276 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9slv4\" (UniqueName: \"kubernetes.io/projected/2355bf0c-e104-4dcd-888a-e164fd5d89be-kube-api-access-9slv4\") pod \"nova-cell0-cell-mapping-v8tkk\" (UID: \"2355bf0c-e104-4dcd-888a-e164fd5d89be\") " pod="openstack/nova-cell0-cell-mapping-v8tkk" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.613191 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-create-55ll5"] Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.614824 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-55ll5" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.652927 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da85279b-be32-48c7-8e3d-1e216c4b9023-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"da85279b-be32-48c7-8e3d-1e216c4b9023\") " pod="openstack/nova-metadata-0" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.653116 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qfnbb\" (UniqueName: \"kubernetes.io/projected/da85279b-be32-48c7-8e3d-1e216c4b9023-kube-api-access-qfnbb\") pod \"nova-metadata-0\" (UID: \"da85279b-be32-48c7-8e3d-1e216c4b9023\") " pod="openstack/nova-metadata-0" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.653179 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da85279b-be32-48c7-8e3d-1e216c4b9023-logs\") pod \"nova-metadata-0\" (UID: \"da85279b-be32-48c7-8e3d-1e216c4b9023\") " pod="openstack/nova-metadata-0" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.653301 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da85279b-be32-48c7-8e3d-1e216c4b9023-config-data\") pod \"nova-metadata-0\" (UID: \"da85279b-be32-48c7-8e3d-1e216c4b9023\") " pod="openstack/nova-metadata-0" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.666496 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-v8tkk" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.678525 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-55ll5"] Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.759794 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da85279b-be32-48c7-8e3d-1e216c4b9023-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"da85279b-be32-48c7-8e3d-1e216c4b9023\") " pod="openstack/nova-metadata-0" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.759872 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qfnbb\" (UniqueName: \"kubernetes.io/projected/da85279b-be32-48c7-8e3d-1e216c4b9023-kube-api-access-qfnbb\") pod \"nova-metadata-0\" (UID: \"da85279b-be32-48c7-8e3d-1e216c4b9023\") " pod="openstack/nova-metadata-0" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.759903 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da85279b-be32-48c7-8e3d-1e216c4b9023-logs\") pod \"nova-metadata-0\" (UID: \"da85279b-be32-48c7-8e3d-1e216c4b9023\") " pod="openstack/nova-metadata-0" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.759955 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da85279b-be32-48c7-8e3d-1e216c4b9023-config-data\") pod \"nova-metadata-0\" (UID: \"da85279b-be32-48c7-8e3d-1e216c4b9023\") " pod="openstack/nova-metadata-0" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.760037 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ccb9a538-950d-4c50-9ee3-380703481e5e-operator-scripts\") pod \"aodh-db-create-55ll5\" (UID: \"ccb9a538-950d-4c50-9ee3-380703481e5e\") " pod="openstack/aodh-db-create-55ll5" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.760087 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z4rw6\" (UniqueName: \"kubernetes.io/projected/ccb9a538-950d-4c50-9ee3-380703481e5e-kube-api-access-z4rw6\") pod \"aodh-db-create-55ll5\" (UID: \"ccb9a538-950d-4c50-9ee3-380703481e5e\") " pod="openstack/aodh-db-create-55ll5" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.761122 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da85279b-be32-48c7-8e3d-1e216c4b9023-logs\") pod \"nova-metadata-0\" (UID: \"da85279b-be32-48c7-8e3d-1e216c4b9023\") " pod="openstack/nova-metadata-0" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.767012 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da85279b-be32-48c7-8e3d-1e216c4b9023-config-data\") pod \"nova-metadata-0\" (UID: \"da85279b-be32-48c7-8e3d-1e216c4b9023\") " pod="openstack/nova-metadata-0" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.777859 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da85279b-be32-48c7-8e3d-1e216c4b9023-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"da85279b-be32-48c7-8e3d-1e216c4b9023\") " pod="openstack/nova-metadata-0" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.797630 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qfnbb\" (UniqueName: \"kubernetes.io/projected/da85279b-be32-48c7-8e3d-1e216c4b9023-kube-api-access-qfnbb\") pod \"nova-metadata-0\" (UID: \"da85279b-be32-48c7-8e3d-1e216c4b9023\") " pod="openstack/nova-metadata-0" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.843901 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.846045 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.848327 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.851128 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.867399 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ccb9a538-950d-4c50-9ee3-380703481e5e-operator-scripts\") pod \"aodh-db-create-55ll5\" (UID: \"ccb9a538-950d-4c50-9ee3-380703481e5e\") " pod="openstack/aodh-db-create-55ll5" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.867473 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z4rw6\" (UniqueName: \"kubernetes.io/projected/ccb9a538-950d-4c50-9ee3-380703481e5e-kube-api-access-z4rw6\") pod \"aodh-db-create-55ll5\" (UID: \"ccb9a538-950d-4c50-9ee3-380703481e5e\") " pod="openstack/aodh-db-create-55ll5" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.868471 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ccb9a538-950d-4c50-9ee3-380703481e5e-operator-scripts\") pod \"aodh-db-create-55ll5\" (UID: \"ccb9a538-950d-4c50-9ee3-380703481e5e\") " pod="openstack/aodh-db-create-55ll5" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.879751 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.882159 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.884303 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.891555 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-180d-account-create-update-8bmhg"] Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.893820 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z4rw6\" (UniqueName: \"kubernetes.io/projected/ccb9a538-950d-4c50-9ee3-380703481e5e-kube-api-access-z4rw6\") pod \"aodh-db-create-55ll5\" (UID: \"ccb9a538-950d-4c50-9ee3-380703481e5e\") " pod="openstack/aodh-db-create-55ll5" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.898991 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-180d-account-create-update-8bmhg" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.905751 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.906086 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-db-secret" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.914852 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-9b86998b5-7w8wv"] Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.918167 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.926214 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.935490 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-9b86998b5-7w8wv"] Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.946725 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.948042 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.953634 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.957554 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.968843 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-180d-account-create-update-8bmhg"] Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.976932 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5889d0d5-76b2-4fda-8b1f-65309d462a62-logs\") pod \"nova-api-0\" (UID: \"5889d0d5-76b2-4fda-8b1f-65309d462a62\") " pod="openstack/nova-api-0" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.976986 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-db7jz\" (UniqueName: \"kubernetes.io/projected/5889d0d5-76b2-4fda-8b1f-65309d462a62-kube-api-access-db7jz\") pod \"nova-api-0\" (UID: \"5889d0d5-76b2-4fda-8b1f-65309d462a62\") " pod="openstack/nova-api-0" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.981033 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5889d0d5-76b2-4fda-8b1f-65309d462a62-config-data\") pod \"nova-api-0\" (UID: \"5889d0d5-76b2-4fda-8b1f-65309d462a62\") " pod="openstack/nova-api-0" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.981172 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c1fed03-8447-4f90-8dc0-013a732cb664-config-data\") pod \"nova-scheduler-0\" (UID: \"8c1fed03-8447-4f90-8dc0-013a732cb664\") " pod="openstack/nova-scheduler-0" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.981311 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gz4nf\" (UniqueName: \"kubernetes.io/projected/444870af-f53d-4457-9650-f4de59dc6c14-kube-api-access-gz4nf\") pod \"aodh-180d-account-create-update-8bmhg\" (UID: \"444870af-f53d-4457-9650-f4de59dc6c14\") " pod="openstack/aodh-180d-account-create-update-8bmhg" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.981375 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5889d0d5-76b2-4fda-8b1f-65309d462a62-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5889d0d5-76b2-4fda-8b1f-65309d462a62\") " pod="openstack/nova-api-0" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.981451 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c1fed03-8447-4f90-8dc0-013a732cb664-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"8c1fed03-8447-4f90-8dc0-013a732cb664\") " pod="openstack/nova-scheduler-0" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.981491 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jxpv6\" (UniqueName: \"kubernetes.io/projected/8c1fed03-8447-4f90-8dc0-013a732cb664-kube-api-access-jxpv6\") pod \"nova-scheduler-0\" (UID: \"8c1fed03-8447-4f90-8dc0-013a732cb664\") " pod="openstack/nova-scheduler-0" Nov 26 22:43:50 crc kubenswrapper[4903]: I1126 22:43:50.981512 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/444870af-f53d-4457-9650-f4de59dc6c14-operator-scripts\") pod \"aodh-180d-account-create-update-8bmhg\" (UID: \"444870af-f53d-4457-9650-f4de59dc6c14\") " pod="openstack/aodh-180d-account-create-update-8bmhg" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.030237 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-55ll5" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.086005 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/96696972-c8ae-4828-b9d4-78825bd31e3f-dns-svc\") pod \"dnsmasq-dns-9b86998b5-7w8wv\" (UID: \"96696972-c8ae-4828-b9d4-78825bd31e3f\") " pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.086064 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5889d0d5-76b2-4fda-8b1f-65309d462a62-logs\") pod \"nova-api-0\" (UID: \"5889d0d5-76b2-4fda-8b1f-65309d462a62\") " pod="openstack/nova-api-0" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.086093 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-db7jz\" (UniqueName: \"kubernetes.io/projected/5889d0d5-76b2-4fda-8b1f-65309d462a62-kube-api-access-db7jz\") pod \"nova-api-0\" (UID: \"5889d0d5-76b2-4fda-8b1f-65309d462a62\") " pod="openstack/nova-api-0" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.086118 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/96696972-c8ae-4828-b9d4-78825bd31e3f-ovsdbserver-sb\") pod \"dnsmasq-dns-9b86998b5-7w8wv\" (UID: \"96696972-c8ae-4828-b9d4-78825bd31e3f\") " pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.086151 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df060096-cf5d-46ee-a2c8-8ba582a39e0f-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"df060096-cf5d-46ee-a2c8-8ba582a39e0f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.086173 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df060096-cf5d-46ee-a2c8-8ba582a39e0f-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"df060096-cf5d-46ee-a2c8-8ba582a39e0f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.086194 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5889d0d5-76b2-4fda-8b1f-65309d462a62-config-data\") pod \"nova-api-0\" (UID: \"5889d0d5-76b2-4fda-8b1f-65309d462a62\") " pod="openstack/nova-api-0" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.086225 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/96696972-c8ae-4828-b9d4-78825bd31e3f-ovsdbserver-nb\") pod \"dnsmasq-dns-9b86998b5-7w8wv\" (UID: \"96696972-c8ae-4828-b9d4-78825bd31e3f\") " pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.086242 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7qtv5\" (UniqueName: \"kubernetes.io/projected/df060096-cf5d-46ee-a2c8-8ba582a39e0f-kube-api-access-7qtv5\") pod \"nova-cell1-novncproxy-0\" (UID: \"df060096-cf5d-46ee-a2c8-8ba582a39e0f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.086262 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96696972-c8ae-4828-b9d4-78825bd31e3f-config\") pod \"dnsmasq-dns-9b86998b5-7w8wv\" (UID: \"96696972-c8ae-4828-b9d4-78825bd31e3f\") " pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.086287 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c1fed03-8447-4f90-8dc0-013a732cb664-config-data\") pod \"nova-scheduler-0\" (UID: \"8c1fed03-8447-4f90-8dc0-013a732cb664\") " pod="openstack/nova-scheduler-0" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.086315 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/96696972-c8ae-4828-b9d4-78825bd31e3f-dns-swift-storage-0\") pod \"dnsmasq-dns-9b86998b5-7w8wv\" (UID: \"96696972-c8ae-4828-b9d4-78825bd31e3f\") " pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.086363 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gz4nf\" (UniqueName: \"kubernetes.io/projected/444870af-f53d-4457-9650-f4de59dc6c14-kube-api-access-gz4nf\") pod \"aodh-180d-account-create-update-8bmhg\" (UID: \"444870af-f53d-4457-9650-f4de59dc6c14\") " pod="openstack/aodh-180d-account-create-update-8bmhg" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.086390 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5889d0d5-76b2-4fda-8b1f-65309d462a62-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5889d0d5-76b2-4fda-8b1f-65309d462a62\") " pod="openstack/nova-api-0" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.086432 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c1fed03-8447-4f90-8dc0-013a732cb664-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"8c1fed03-8447-4f90-8dc0-013a732cb664\") " pod="openstack/nova-scheduler-0" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.086453 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jxpv6\" (UniqueName: \"kubernetes.io/projected/8c1fed03-8447-4f90-8dc0-013a732cb664-kube-api-access-jxpv6\") pod \"nova-scheduler-0\" (UID: \"8c1fed03-8447-4f90-8dc0-013a732cb664\") " pod="openstack/nova-scheduler-0" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.086470 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/444870af-f53d-4457-9650-f4de59dc6c14-operator-scripts\") pod \"aodh-180d-account-create-update-8bmhg\" (UID: \"444870af-f53d-4457-9650-f4de59dc6c14\") " pod="openstack/aodh-180d-account-create-update-8bmhg" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.086488 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzj9z\" (UniqueName: \"kubernetes.io/projected/96696972-c8ae-4828-b9d4-78825bd31e3f-kube-api-access-lzj9z\") pod \"dnsmasq-dns-9b86998b5-7w8wv\" (UID: \"96696972-c8ae-4828-b9d4-78825bd31e3f\") " pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.086976 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5889d0d5-76b2-4fda-8b1f-65309d462a62-logs\") pod \"nova-api-0\" (UID: \"5889d0d5-76b2-4fda-8b1f-65309d462a62\") " pod="openstack/nova-api-0" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.090647 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/444870af-f53d-4457-9650-f4de59dc6c14-operator-scripts\") pod \"aodh-180d-account-create-update-8bmhg\" (UID: \"444870af-f53d-4457-9650-f4de59dc6c14\") " pod="openstack/aodh-180d-account-create-update-8bmhg" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.095606 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5889d0d5-76b2-4fda-8b1f-65309d462a62-config-data\") pod \"nova-api-0\" (UID: \"5889d0d5-76b2-4fda-8b1f-65309d462a62\") " pod="openstack/nova-api-0" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.104980 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c1fed03-8447-4f90-8dc0-013a732cb664-config-data\") pod \"nova-scheduler-0\" (UID: \"8c1fed03-8447-4f90-8dc0-013a732cb664\") " pod="openstack/nova-scheduler-0" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.105501 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5889d0d5-76b2-4fda-8b1f-65309d462a62-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5889d0d5-76b2-4fda-8b1f-65309d462a62\") " pod="openstack/nova-api-0" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.108622 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gz4nf\" (UniqueName: \"kubernetes.io/projected/444870af-f53d-4457-9650-f4de59dc6c14-kube-api-access-gz4nf\") pod \"aodh-180d-account-create-update-8bmhg\" (UID: \"444870af-f53d-4457-9650-f4de59dc6c14\") " pod="openstack/aodh-180d-account-create-update-8bmhg" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.113017 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-db7jz\" (UniqueName: \"kubernetes.io/projected/5889d0d5-76b2-4fda-8b1f-65309d462a62-kube-api-access-db7jz\") pod \"nova-api-0\" (UID: \"5889d0d5-76b2-4fda-8b1f-65309d462a62\") " pod="openstack/nova-api-0" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.113371 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c1fed03-8447-4f90-8dc0-013a732cb664-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"8c1fed03-8447-4f90-8dc0-013a732cb664\") " pod="openstack/nova-scheduler-0" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.114405 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jxpv6\" (UniqueName: \"kubernetes.io/projected/8c1fed03-8447-4f90-8dc0-013a732cb664-kube-api-access-jxpv6\") pod \"nova-scheduler-0\" (UID: \"8c1fed03-8447-4f90-8dc0-013a732cb664\") " pod="openstack/nova-scheduler-0" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.202361 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzj9z\" (UniqueName: \"kubernetes.io/projected/96696972-c8ae-4828-b9d4-78825bd31e3f-kube-api-access-lzj9z\") pod \"dnsmasq-dns-9b86998b5-7w8wv\" (UID: \"96696972-c8ae-4828-b9d4-78825bd31e3f\") " pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.202453 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/96696972-c8ae-4828-b9d4-78825bd31e3f-dns-svc\") pod \"dnsmasq-dns-9b86998b5-7w8wv\" (UID: \"96696972-c8ae-4828-b9d4-78825bd31e3f\") " pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.202531 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/96696972-c8ae-4828-b9d4-78825bd31e3f-ovsdbserver-sb\") pod \"dnsmasq-dns-9b86998b5-7w8wv\" (UID: \"96696972-c8ae-4828-b9d4-78825bd31e3f\") " pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.202568 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df060096-cf5d-46ee-a2c8-8ba582a39e0f-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"df060096-cf5d-46ee-a2c8-8ba582a39e0f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.202591 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df060096-cf5d-46ee-a2c8-8ba582a39e0f-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"df060096-cf5d-46ee-a2c8-8ba582a39e0f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.202645 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/96696972-c8ae-4828-b9d4-78825bd31e3f-ovsdbserver-nb\") pod \"dnsmasq-dns-9b86998b5-7w8wv\" (UID: \"96696972-c8ae-4828-b9d4-78825bd31e3f\") " pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.202665 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7qtv5\" (UniqueName: \"kubernetes.io/projected/df060096-cf5d-46ee-a2c8-8ba582a39e0f-kube-api-access-7qtv5\") pod \"nova-cell1-novncproxy-0\" (UID: \"df060096-cf5d-46ee-a2c8-8ba582a39e0f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.202698 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96696972-c8ae-4828-b9d4-78825bd31e3f-config\") pod \"dnsmasq-dns-9b86998b5-7w8wv\" (UID: \"96696972-c8ae-4828-b9d4-78825bd31e3f\") " pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.202803 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/96696972-c8ae-4828-b9d4-78825bd31e3f-dns-swift-storage-0\") pod \"dnsmasq-dns-9b86998b5-7w8wv\" (UID: \"96696972-c8ae-4828-b9d4-78825bd31e3f\") " pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.203938 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/96696972-c8ae-4828-b9d4-78825bd31e3f-dns-swift-storage-0\") pod \"dnsmasq-dns-9b86998b5-7w8wv\" (UID: \"96696972-c8ae-4828-b9d4-78825bd31e3f\") " pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.204937 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/96696972-c8ae-4828-b9d4-78825bd31e3f-dns-svc\") pod \"dnsmasq-dns-9b86998b5-7w8wv\" (UID: \"96696972-c8ae-4828-b9d4-78825bd31e3f\") " pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.205880 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/96696972-c8ae-4828-b9d4-78825bd31e3f-ovsdbserver-sb\") pod \"dnsmasq-dns-9b86998b5-7w8wv\" (UID: \"96696972-c8ae-4828-b9d4-78825bd31e3f\") " pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.210084 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/96696972-c8ae-4828-b9d4-78825bd31e3f-ovsdbserver-nb\") pod \"dnsmasq-dns-9b86998b5-7w8wv\" (UID: \"96696972-c8ae-4828-b9d4-78825bd31e3f\") " pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.212553 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96696972-c8ae-4828-b9d4-78825bd31e3f-config\") pod \"dnsmasq-dns-9b86998b5-7w8wv\" (UID: \"96696972-c8ae-4828-b9d4-78825bd31e3f\") " pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.215516 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df060096-cf5d-46ee-a2c8-8ba582a39e0f-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"df060096-cf5d-46ee-a2c8-8ba582a39e0f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.218895 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzj9z\" (UniqueName: \"kubernetes.io/projected/96696972-c8ae-4828-b9d4-78825bd31e3f-kube-api-access-lzj9z\") pod \"dnsmasq-dns-9b86998b5-7w8wv\" (UID: \"96696972-c8ae-4828-b9d4-78825bd31e3f\") " pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.219256 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df060096-cf5d-46ee-a2c8-8ba582a39e0f-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"df060096-cf5d-46ee-a2c8-8ba582a39e0f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.227244 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7qtv5\" (UniqueName: \"kubernetes.io/projected/df060096-cf5d-46ee-a2c8-8ba582a39e0f-kube-api-access-7qtv5\") pod \"nova-cell1-novncproxy-0\" (UID: \"df060096-cf5d-46ee-a2c8-8ba582a39e0f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.230770 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.244735 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.265800 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-180d-account-create-update-8bmhg" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.281569 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.294301 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.354087 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-v8tkk"] Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.656756 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-55ll5"] Nov 26 22:43:51 crc kubenswrapper[4903]: W1126 22:43:51.660099 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podda85279b_be32_48c7_8e3d_1e216c4b9023.slice/crio-f481d68fa6b9468c474f856d47076737e33db27b5026a18e9ad0f6a2db016d49 WatchSource:0}: Error finding container f481d68fa6b9468c474f856d47076737e33db27b5026a18e9ad0f6a2db016d49: Status 404 returned error can't find the container with id f481d68fa6b9468c474f856d47076737e33db27b5026a18e9ad0f6a2db016d49 Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.676499 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.856240 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-zs7fj"] Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.859780 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-zs7fj" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.862407 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.862681 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.879561 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-zs7fj"] Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.918046 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g2dcq\" (UniqueName: \"kubernetes.io/projected/be8f91a6-a9bc-4273-ad51-664e6e64ebb0-kube-api-access-g2dcq\") pod \"nova-cell1-conductor-db-sync-zs7fj\" (UID: \"be8f91a6-a9bc-4273-ad51-664e6e64ebb0\") " pod="openstack/nova-cell1-conductor-db-sync-zs7fj" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.918126 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be8f91a6-a9bc-4273-ad51-664e6e64ebb0-scripts\") pod \"nova-cell1-conductor-db-sync-zs7fj\" (UID: \"be8f91a6-a9bc-4273-ad51-664e6e64ebb0\") " pod="openstack/nova-cell1-conductor-db-sync-zs7fj" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.918226 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be8f91a6-a9bc-4273-ad51-664e6e64ebb0-config-data\") pod \"nova-cell1-conductor-db-sync-zs7fj\" (UID: \"be8f91a6-a9bc-4273-ad51-664e6e64ebb0\") " pod="openstack/nova-cell1-conductor-db-sync-zs7fj" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.918289 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be8f91a6-a9bc-4273-ad51-664e6e64ebb0-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-zs7fj\" (UID: \"be8f91a6-a9bc-4273-ad51-664e6e64ebb0\") " pod="openstack/nova-cell1-conductor-db-sync-zs7fj" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.918446 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.929476 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-55ll5" event={"ID":"ccb9a538-950d-4c50-9ee3-380703481e5e","Type":"ContainerStarted","Data":"f5d287df59b423c788fa8a5d07a71a63430761f981718de22eeebccebe69d236"} Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.933869 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-v8tkk" event={"ID":"2355bf0c-e104-4dcd-888a-e164fd5d89be","Type":"ContainerStarted","Data":"cada2af09253f080bd319ef88c08e5a8d6beb4b9ba4dce77eb48dd3d8b95c18e"} Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.933910 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-v8tkk" event={"ID":"2355bf0c-e104-4dcd-888a-e164fd5d89be","Type":"ContainerStarted","Data":"d4f2d714bbc847deda8cd00c739884308fdea0b4306cc25d71ee4918e6ae1fb9"} Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.936924 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"da85279b-be32-48c7-8e3d-1e216c4b9023","Type":"ContainerStarted","Data":"f481d68fa6b9468c474f856d47076737e33db27b5026a18e9ad0f6a2db016d49"} Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.951001 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-v8tkk" podStartSLOduration=1.950985148 podStartE2EDuration="1.950985148s" podCreationTimestamp="2025-11-26 22:43:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:43:51.949460807 +0000 UTC m=+1360.639695717" watchObservedRunningTime="2025-11-26 22:43:51.950985148 +0000 UTC m=+1360.641220058" Nov 26 22:43:51 crc kubenswrapper[4903]: I1126 22:43:51.951963 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 22:43:52 crc kubenswrapper[4903]: I1126 22:43:52.020282 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be8f91a6-a9bc-4273-ad51-664e6e64ebb0-config-data\") pod \"nova-cell1-conductor-db-sync-zs7fj\" (UID: \"be8f91a6-a9bc-4273-ad51-664e6e64ebb0\") " pod="openstack/nova-cell1-conductor-db-sync-zs7fj" Nov 26 22:43:52 crc kubenswrapper[4903]: I1126 22:43:52.020372 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be8f91a6-a9bc-4273-ad51-664e6e64ebb0-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-zs7fj\" (UID: \"be8f91a6-a9bc-4273-ad51-664e6e64ebb0\") " pod="openstack/nova-cell1-conductor-db-sync-zs7fj" Nov 26 22:43:52 crc kubenswrapper[4903]: I1126 22:43:52.020458 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g2dcq\" (UniqueName: \"kubernetes.io/projected/be8f91a6-a9bc-4273-ad51-664e6e64ebb0-kube-api-access-g2dcq\") pod \"nova-cell1-conductor-db-sync-zs7fj\" (UID: \"be8f91a6-a9bc-4273-ad51-664e6e64ebb0\") " pod="openstack/nova-cell1-conductor-db-sync-zs7fj" Nov 26 22:43:52 crc kubenswrapper[4903]: I1126 22:43:52.020510 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be8f91a6-a9bc-4273-ad51-664e6e64ebb0-scripts\") pod \"nova-cell1-conductor-db-sync-zs7fj\" (UID: \"be8f91a6-a9bc-4273-ad51-664e6e64ebb0\") " pod="openstack/nova-cell1-conductor-db-sync-zs7fj" Nov 26 22:43:52 crc kubenswrapper[4903]: I1126 22:43:52.026485 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be8f91a6-a9bc-4273-ad51-664e6e64ebb0-config-data\") pod \"nova-cell1-conductor-db-sync-zs7fj\" (UID: \"be8f91a6-a9bc-4273-ad51-664e6e64ebb0\") " pod="openstack/nova-cell1-conductor-db-sync-zs7fj" Nov 26 22:43:52 crc kubenswrapper[4903]: I1126 22:43:52.026500 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be8f91a6-a9bc-4273-ad51-664e6e64ebb0-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-zs7fj\" (UID: \"be8f91a6-a9bc-4273-ad51-664e6e64ebb0\") " pod="openstack/nova-cell1-conductor-db-sync-zs7fj" Nov 26 22:43:52 crc kubenswrapper[4903]: I1126 22:43:52.031206 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be8f91a6-a9bc-4273-ad51-664e6e64ebb0-scripts\") pod \"nova-cell1-conductor-db-sync-zs7fj\" (UID: \"be8f91a6-a9bc-4273-ad51-664e6e64ebb0\") " pod="openstack/nova-cell1-conductor-db-sync-zs7fj" Nov 26 22:43:52 crc kubenswrapper[4903]: I1126 22:43:52.073451 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g2dcq\" (UniqueName: \"kubernetes.io/projected/be8f91a6-a9bc-4273-ad51-664e6e64ebb0-kube-api-access-g2dcq\") pod \"nova-cell1-conductor-db-sync-zs7fj\" (UID: \"be8f91a6-a9bc-4273-ad51-664e6e64ebb0\") " pod="openstack/nova-cell1-conductor-db-sync-zs7fj" Nov 26 22:43:52 crc kubenswrapper[4903]: I1126 22:43:52.134069 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-180d-account-create-update-8bmhg"] Nov 26 22:43:52 crc kubenswrapper[4903]: I1126 22:43:52.193072 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-zs7fj" Nov 26 22:43:52 crc kubenswrapper[4903]: I1126 22:43:52.366875 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 22:43:52 crc kubenswrapper[4903]: W1126 22:43:52.381968 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod96696972_c8ae_4828_b9d4_78825bd31e3f.slice/crio-afd241d71703b7175c29b6dee20b74998db70415a0dd4913afed914d4775b14e WatchSource:0}: Error finding container afd241d71703b7175c29b6dee20b74998db70415a0dd4913afed914d4775b14e: Status 404 returned error can't find the container with id afd241d71703b7175c29b6dee20b74998db70415a0dd4913afed914d4775b14e Nov 26 22:43:52 crc kubenswrapper[4903]: I1126 22:43:52.385125 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-9b86998b5-7w8wv"] Nov 26 22:43:52 crc kubenswrapper[4903]: I1126 22:43:52.764281 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-zs7fj"] Nov 26 22:43:52 crc kubenswrapper[4903]: I1126 22:43:52.956756 4903 generic.go:334] "Generic (PLEG): container finished" podID="ccb9a538-950d-4c50-9ee3-380703481e5e" containerID="33e9d9715f9763d48f40aa337a05a613716c9593eadba58be245347bddd936bb" exitCode=0 Nov 26 22:43:52 crc kubenswrapper[4903]: I1126 22:43:52.956840 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-55ll5" event={"ID":"ccb9a538-950d-4c50-9ee3-380703481e5e","Type":"ContainerDied","Data":"33e9d9715f9763d48f40aa337a05a613716c9593eadba58be245347bddd936bb"} Nov 26 22:43:52 crc kubenswrapper[4903]: I1126 22:43:52.960230 4903 generic.go:334] "Generic (PLEG): container finished" podID="96696972-c8ae-4828-b9d4-78825bd31e3f" containerID="007987fee0d2a4a81f72f22648f253a11a86304f7bbdd1ab772216c4a3eb6b9d" exitCode=0 Nov 26 22:43:52 crc kubenswrapper[4903]: I1126 22:43:52.960274 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" event={"ID":"96696972-c8ae-4828-b9d4-78825bd31e3f","Type":"ContainerDied","Data":"007987fee0d2a4a81f72f22648f253a11a86304f7bbdd1ab772216c4a3eb6b9d"} Nov 26 22:43:52 crc kubenswrapper[4903]: I1126 22:43:52.960292 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" event={"ID":"96696972-c8ae-4828-b9d4-78825bd31e3f","Type":"ContainerStarted","Data":"afd241d71703b7175c29b6dee20b74998db70415a0dd4913afed914d4775b14e"} Nov 26 22:43:52 crc kubenswrapper[4903]: I1126 22:43:52.967857 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-zs7fj" event={"ID":"be8f91a6-a9bc-4273-ad51-664e6e64ebb0","Type":"ContainerStarted","Data":"1d8592296137cc8c2b317e37922cf90d7cd771e67baa30a33342898bd93dbf00"} Nov 26 22:43:52 crc kubenswrapper[4903]: I1126 22:43:52.979657 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"df060096-cf5d-46ee-a2c8-8ba582a39e0f","Type":"ContainerStarted","Data":"38d8c06c67278de08df41d86346f1577a25e708d273054bbea437a4294cd2ad7"} Nov 26 22:43:52 crc kubenswrapper[4903]: I1126 22:43:52.982491 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"8c1fed03-8447-4f90-8dc0-013a732cb664","Type":"ContainerStarted","Data":"adf6f07ab2907295004d162e0efa0878f3d12dbb816a4d1ebeb8d32ff374a04e"} Nov 26 22:43:52 crc kubenswrapper[4903]: I1126 22:43:52.988408 4903 generic.go:334] "Generic (PLEG): container finished" podID="444870af-f53d-4457-9650-f4de59dc6c14" containerID="d9dcae88ab8041f2d589cbaa62e0b1a19bebe572a35d5907e3539cb2858a1808" exitCode=0 Nov 26 22:43:52 crc kubenswrapper[4903]: I1126 22:43:52.988480 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-180d-account-create-update-8bmhg" event={"ID":"444870af-f53d-4457-9650-f4de59dc6c14","Type":"ContainerDied","Data":"d9dcae88ab8041f2d589cbaa62e0b1a19bebe572a35d5907e3539cb2858a1808"} Nov 26 22:43:52 crc kubenswrapper[4903]: I1126 22:43:52.988501 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-180d-account-create-update-8bmhg" event={"ID":"444870af-f53d-4457-9650-f4de59dc6c14","Type":"ContainerStarted","Data":"334a7e51f7d90572be3e14612bb3fbb8e673f28bdcde7daf3a56e3d27bb8ad9d"} Nov 26 22:43:52 crc kubenswrapper[4903]: I1126 22:43:52.998994 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5889d0d5-76b2-4fda-8b1f-65309d462a62","Type":"ContainerStarted","Data":"92526742a0150f1e1b0d5dd2dbcb1d4da966000173495b4f1396e1a1097796b0"} Nov 26 22:43:54 crc kubenswrapper[4903]: I1126 22:43:54.022971 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" event={"ID":"96696972-c8ae-4828-b9d4-78825bd31e3f","Type":"ContainerStarted","Data":"afeec996aa8e6ffd5b6d9105ee3f55b83353f9e96e6f138bc66bbb84aaf36703"} Nov 26 22:43:54 crc kubenswrapper[4903]: I1126 22:43:54.024784 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" Nov 26 22:43:54 crc kubenswrapper[4903]: I1126 22:43:54.070309 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" podStartSLOduration=4.070287372 podStartE2EDuration="4.070287372s" podCreationTimestamp="2025-11-26 22:43:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:43:54.053641846 +0000 UTC m=+1362.743876776" watchObservedRunningTime="2025-11-26 22:43:54.070287372 +0000 UTC m=+1362.760522282" Nov 26 22:43:54 crc kubenswrapper[4903]: I1126 22:43:54.123510 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-zs7fj" podStartSLOduration=3.123488669 podStartE2EDuration="3.123488669s" podCreationTimestamp="2025-11-26 22:43:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:43:54.072571654 +0000 UTC m=+1362.762806574" watchObservedRunningTime="2025-11-26 22:43:54.123488669 +0000 UTC m=+1362.813723579" Nov 26 22:43:54 crc kubenswrapper[4903]: I1126 22:43:54.135595 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-zs7fj" event={"ID":"be8f91a6-a9bc-4273-ad51-664e6e64ebb0","Type":"ContainerStarted","Data":"69697950149a64697ed4335dcd25e520b542879081887e309883001df4f79219"} Nov 26 22:43:54 crc kubenswrapper[4903]: I1126 22:43:54.143892 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 22:43:54 crc kubenswrapper[4903]: I1126 22:43:54.157505 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 22:43:55 crc kubenswrapper[4903]: I1126 22:43:55.068655 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-180d-account-create-update-8bmhg" event={"ID":"444870af-f53d-4457-9650-f4de59dc6c14","Type":"ContainerDied","Data":"334a7e51f7d90572be3e14612bb3fbb8e673f28bdcde7daf3a56e3d27bb8ad9d"} Nov 26 22:43:55 crc kubenswrapper[4903]: I1126 22:43:55.068992 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="334a7e51f7d90572be3e14612bb3fbb8e673f28bdcde7daf3a56e3d27bb8ad9d" Nov 26 22:43:55 crc kubenswrapper[4903]: I1126 22:43:55.070460 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-55ll5" event={"ID":"ccb9a538-950d-4c50-9ee3-380703481e5e","Type":"ContainerDied","Data":"f5d287df59b423c788fa8a5d07a71a63430761f981718de22eeebccebe69d236"} Nov 26 22:43:55 crc kubenswrapper[4903]: I1126 22:43:55.070488 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f5d287df59b423c788fa8a5d07a71a63430761f981718de22eeebccebe69d236" Nov 26 22:43:55 crc kubenswrapper[4903]: I1126 22:43:55.102871 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-55ll5" Nov 26 22:43:55 crc kubenswrapper[4903]: I1126 22:43:55.108781 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-180d-account-create-update-8bmhg" Nov 26 22:43:55 crc kubenswrapper[4903]: I1126 22:43:55.227368 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z4rw6\" (UniqueName: \"kubernetes.io/projected/ccb9a538-950d-4c50-9ee3-380703481e5e-kube-api-access-z4rw6\") pod \"ccb9a538-950d-4c50-9ee3-380703481e5e\" (UID: \"ccb9a538-950d-4c50-9ee3-380703481e5e\") " Nov 26 22:43:55 crc kubenswrapper[4903]: I1126 22:43:55.227438 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gz4nf\" (UniqueName: \"kubernetes.io/projected/444870af-f53d-4457-9650-f4de59dc6c14-kube-api-access-gz4nf\") pod \"444870af-f53d-4457-9650-f4de59dc6c14\" (UID: \"444870af-f53d-4457-9650-f4de59dc6c14\") " Nov 26 22:43:55 crc kubenswrapper[4903]: I1126 22:43:55.227605 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ccb9a538-950d-4c50-9ee3-380703481e5e-operator-scripts\") pod \"ccb9a538-950d-4c50-9ee3-380703481e5e\" (UID: \"ccb9a538-950d-4c50-9ee3-380703481e5e\") " Nov 26 22:43:55 crc kubenswrapper[4903]: I1126 22:43:55.227670 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/444870af-f53d-4457-9650-f4de59dc6c14-operator-scripts\") pod \"444870af-f53d-4457-9650-f4de59dc6c14\" (UID: \"444870af-f53d-4457-9650-f4de59dc6c14\") " Nov 26 22:43:55 crc kubenswrapper[4903]: I1126 22:43:55.228268 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ccb9a538-950d-4c50-9ee3-380703481e5e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ccb9a538-950d-4c50-9ee3-380703481e5e" (UID: "ccb9a538-950d-4c50-9ee3-380703481e5e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:43:55 crc kubenswrapper[4903]: I1126 22:43:55.229054 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/444870af-f53d-4457-9650-f4de59dc6c14-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "444870af-f53d-4457-9650-f4de59dc6c14" (UID: "444870af-f53d-4457-9650-f4de59dc6c14"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:43:55 crc kubenswrapper[4903]: I1126 22:43:55.233261 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ccb9a538-950d-4c50-9ee3-380703481e5e-kube-api-access-z4rw6" (OuterVolumeSpecName: "kube-api-access-z4rw6") pod "ccb9a538-950d-4c50-9ee3-380703481e5e" (UID: "ccb9a538-950d-4c50-9ee3-380703481e5e"). InnerVolumeSpecName "kube-api-access-z4rw6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:43:55 crc kubenswrapper[4903]: I1126 22:43:55.237473 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/444870af-f53d-4457-9650-f4de59dc6c14-kube-api-access-gz4nf" (OuterVolumeSpecName: "kube-api-access-gz4nf") pod "444870af-f53d-4457-9650-f4de59dc6c14" (UID: "444870af-f53d-4457-9650-f4de59dc6c14"). InnerVolumeSpecName "kube-api-access-gz4nf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:43:55 crc kubenswrapper[4903]: I1126 22:43:55.330537 4903 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ccb9a538-950d-4c50-9ee3-380703481e5e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:55 crc kubenswrapper[4903]: I1126 22:43:55.330582 4903 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/444870af-f53d-4457-9650-f4de59dc6c14-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:55 crc kubenswrapper[4903]: I1126 22:43:55.330600 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z4rw6\" (UniqueName: \"kubernetes.io/projected/ccb9a538-950d-4c50-9ee3-380703481e5e-kube-api-access-z4rw6\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:55 crc kubenswrapper[4903]: I1126 22:43:55.330615 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gz4nf\" (UniqueName: \"kubernetes.io/projected/444870af-f53d-4457-9650-f4de59dc6c14-kube-api-access-gz4nf\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:56 crc kubenswrapper[4903]: I1126 22:43:56.091092 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"8c1fed03-8447-4f90-8dc0-013a732cb664","Type":"ContainerStarted","Data":"f76b4ccbb388ccd60c0394fefe98877a36426ab6ef8e783943c13b74e1ff5cac"} Nov 26 22:43:56 crc kubenswrapper[4903]: I1126 22:43:56.097177 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5889d0d5-76b2-4fda-8b1f-65309d462a62","Type":"ContainerStarted","Data":"9fc4d9f5c0b64cbe7a4dbfaa84620c3746a34ed22b5e74c5c0cdae2197a0dd00"} Nov 26 22:43:56 crc kubenswrapper[4903]: I1126 22:43:56.097224 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5889d0d5-76b2-4fda-8b1f-65309d462a62","Type":"ContainerStarted","Data":"dbdf7a938535a4f190cc734bf159cd24a096fb0decb75b167b58ad5864ed9e39"} Nov 26 22:43:56 crc kubenswrapper[4903]: I1126 22:43:56.103873 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"da85279b-be32-48c7-8e3d-1e216c4b9023","Type":"ContainerStarted","Data":"1ad27114d780839165f5fc5949f0d5780454875c421cd2531f2b3f27e89f77d6"} Nov 26 22:43:56 crc kubenswrapper[4903]: I1126 22:43:56.111846 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"df060096-cf5d-46ee-a2c8-8ba582a39e0f","Type":"ContainerStarted","Data":"0a32496b7bc91a4ff6f5287b8a32fc51b22647360ab1a31205127065324f81ae"} Nov 26 22:43:56 crc kubenswrapper[4903]: I1126 22:43:56.111888 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-180d-account-create-update-8bmhg" Nov 26 22:43:56 crc kubenswrapper[4903]: I1126 22:43:56.111863 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-55ll5" Nov 26 22:43:56 crc kubenswrapper[4903]: I1126 22:43:56.112318 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="df060096-cf5d-46ee-a2c8-8ba582a39e0f" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://0a32496b7bc91a4ff6f5287b8a32fc51b22647360ab1a31205127065324f81ae" gracePeriod=30 Nov 26 22:43:56 crc kubenswrapper[4903]: I1126 22:43:56.112598 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.512425816 podStartE2EDuration="6.112578622s" podCreationTimestamp="2025-11-26 22:43:50 +0000 UTC" firstStartedPulling="2025-11-26 22:43:51.924735163 +0000 UTC m=+1360.614970073" lastFinishedPulling="2025-11-26 22:43:55.524887969 +0000 UTC m=+1364.215122879" observedRunningTime="2025-11-26 22:43:56.103470397 +0000 UTC m=+1364.793705307" watchObservedRunningTime="2025-11-26 22:43:56.112578622 +0000 UTC m=+1364.802813532" Nov 26 22:43:56 crc kubenswrapper[4903]: I1126 22:43:56.157216 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.549478562 podStartE2EDuration="6.157177639s" podCreationTimestamp="2025-11-26 22:43:50 +0000 UTC" firstStartedPulling="2025-11-26 22:43:51.924673402 +0000 UTC m=+1360.614908312" lastFinishedPulling="2025-11-26 22:43:55.532372479 +0000 UTC m=+1364.222607389" observedRunningTime="2025-11-26 22:43:56.133840362 +0000 UTC m=+1364.824075292" watchObservedRunningTime="2025-11-26 22:43:56.157177639 +0000 UTC m=+1364.847412569" Nov 26 22:43:56 crc kubenswrapper[4903]: I1126 22:43:56.189161 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.941560548 podStartE2EDuration="6.189146036s" podCreationTimestamp="2025-11-26 22:43:50 +0000 UTC" firstStartedPulling="2025-11-26 22:43:52.40776782 +0000 UTC m=+1361.098002720" lastFinishedPulling="2025-11-26 22:43:55.655353298 +0000 UTC m=+1364.345588208" observedRunningTime="2025-11-26 22:43:56.167021913 +0000 UTC m=+1364.857256823" watchObservedRunningTime="2025-11-26 22:43:56.189146036 +0000 UTC m=+1364.879380946" Nov 26 22:43:56 crc kubenswrapper[4903]: I1126 22:43:56.245339 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 26 22:43:56 crc kubenswrapper[4903]: I1126 22:43:56.295598 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:43:57 crc kubenswrapper[4903]: I1126 22:43:57.132030 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"da85279b-be32-48c7-8e3d-1e216c4b9023","Type":"ContainerStarted","Data":"a207232ddd9df6acf86c8d0226c9c1f3dde10b19208a4c7c402f0f0682ed737e"} Nov 26 22:43:57 crc kubenswrapper[4903]: I1126 22:43:57.132506 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="da85279b-be32-48c7-8e3d-1e216c4b9023" containerName="nova-metadata-log" containerID="cri-o://1ad27114d780839165f5fc5949f0d5780454875c421cd2531f2b3f27e89f77d6" gracePeriod=30 Nov 26 22:43:57 crc kubenswrapper[4903]: I1126 22:43:57.132566 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="da85279b-be32-48c7-8e3d-1e216c4b9023" containerName="nova-metadata-metadata" containerID="cri-o://a207232ddd9df6acf86c8d0226c9c1f3dde10b19208a4c7c402f0f0682ed737e" gracePeriod=30 Nov 26 22:43:57 crc kubenswrapper[4903]: I1126 22:43:57.200541 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.265109186 podStartE2EDuration="7.200515304s" podCreationTimestamp="2025-11-26 22:43:50 +0000 UTC" firstStartedPulling="2025-11-26 22:43:51.713792395 +0000 UTC m=+1360.404027295" lastFinishedPulling="2025-11-26 22:43:55.649198503 +0000 UTC m=+1364.339433413" observedRunningTime="2025-11-26 22:43:57.179195352 +0000 UTC m=+1365.869430342" watchObservedRunningTime="2025-11-26 22:43:57.200515304 +0000 UTC m=+1365.890750224" Nov 26 22:43:57 crc kubenswrapper[4903]: I1126 22:43:57.937143 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 22:43:57 crc kubenswrapper[4903]: I1126 22:43:57.995634 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qfnbb\" (UniqueName: \"kubernetes.io/projected/da85279b-be32-48c7-8e3d-1e216c4b9023-kube-api-access-qfnbb\") pod \"da85279b-be32-48c7-8e3d-1e216c4b9023\" (UID: \"da85279b-be32-48c7-8e3d-1e216c4b9023\") " Nov 26 22:43:57 crc kubenswrapper[4903]: I1126 22:43:57.995687 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da85279b-be32-48c7-8e3d-1e216c4b9023-config-data\") pod \"da85279b-be32-48c7-8e3d-1e216c4b9023\" (UID: \"da85279b-be32-48c7-8e3d-1e216c4b9023\") " Nov 26 22:43:57 crc kubenswrapper[4903]: I1126 22:43:57.996700 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da85279b-be32-48c7-8e3d-1e216c4b9023-combined-ca-bundle\") pod \"da85279b-be32-48c7-8e3d-1e216c4b9023\" (UID: \"da85279b-be32-48c7-8e3d-1e216c4b9023\") " Nov 26 22:43:57 crc kubenswrapper[4903]: I1126 22:43:57.996777 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da85279b-be32-48c7-8e3d-1e216c4b9023-logs\") pod \"da85279b-be32-48c7-8e3d-1e216c4b9023\" (UID: \"da85279b-be32-48c7-8e3d-1e216c4b9023\") " Nov 26 22:43:57 crc kubenswrapper[4903]: I1126 22:43:57.997119 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da85279b-be32-48c7-8e3d-1e216c4b9023-logs" (OuterVolumeSpecName: "logs") pod "da85279b-be32-48c7-8e3d-1e216c4b9023" (UID: "da85279b-be32-48c7-8e3d-1e216c4b9023"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:43:57 crc kubenswrapper[4903]: I1126 22:43:57.997899 4903 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da85279b-be32-48c7-8e3d-1e216c4b9023-logs\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.003372 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da85279b-be32-48c7-8e3d-1e216c4b9023-kube-api-access-qfnbb" (OuterVolumeSpecName: "kube-api-access-qfnbb") pod "da85279b-be32-48c7-8e3d-1e216c4b9023" (UID: "da85279b-be32-48c7-8e3d-1e216c4b9023"). InnerVolumeSpecName "kube-api-access-qfnbb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.027719 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da85279b-be32-48c7-8e3d-1e216c4b9023-config-data" (OuterVolumeSpecName: "config-data") pod "da85279b-be32-48c7-8e3d-1e216c4b9023" (UID: "da85279b-be32-48c7-8e3d-1e216c4b9023"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.036422 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da85279b-be32-48c7-8e3d-1e216c4b9023-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "da85279b-be32-48c7-8e3d-1e216c4b9023" (UID: "da85279b-be32-48c7-8e3d-1e216c4b9023"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.056819 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.100116 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da85279b-be32-48c7-8e3d-1e216c4b9023-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.100150 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qfnbb\" (UniqueName: \"kubernetes.io/projected/da85279b-be32-48c7-8e3d-1e216c4b9023-kube-api-access-qfnbb\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.100162 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da85279b-be32-48c7-8e3d-1e216c4b9023-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.150423 4903 generic.go:334] "Generic (PLEG): container finished" podID="1e2d4f89-a0e3-43aa-8af6-c07146ef81b2" containerID="2fac9d0f4f7f392184f0411d3cd238827def4c59bea60467291bed0e0ff82774" exitCode=137 Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.150513 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.150516 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2","Type":"ContainerDied","Data":"2fac9d0f4f7f392184f0411d3cd238827def4c59bea60467291bed0e0ff82774"} Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.150586 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2","Type":"ContainerDied","Data":"4c3b4bef97fea08e779606dffcedfc739621d3d498d504f303519689fb0ba9ad"} Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.150614 4903 scope.go:117] "RemoveContainer" containerID="2fac9d0f4f7f392184f0411d3cd238827def4c59bea60467291bed0e0ff82774" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.155025 4903 generic.go:334] "Generic (PLEG): container finished" podID="da85279b-be32-48c7-8e3d-1e216c4b9023" containerID="a207232ddd9df6acf86c8d0226c9c1f3dde10b19208a4c7c402f0f0682ed737e" exitCode=0 Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.155048 4903 generic.go:334] "Generic (PLEG): container finished" podID="da85279b-be32-48c7-8e3d-1e216c4b9023" containerID="1ad27114d780839165f5fc5949f0d5780454875c421cd2531f2b3f27e89f77d6" exitCode=143 Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.155067 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.155104 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"da85279b-be32-48c7-8e3d-1e216c4b9023","Type":"ContainerDied","Data":"a207232ddd9df6acf86c8d0226c9c1f3dde10b19208a4c7c402f0f0682ed737e"} Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.155126 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"da85279b-be32-48c7-8e3d-1e216c4b9023","Type":"ContainerDied","Data":"1ad27114d780839165f5fc5949f0d5780454875c421cd2531f2b3f27e89f77d6"} Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.155136 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"da85279b-be32-48c7-8e3d-1e216c4b9023","Type":"ContainerDied","Data":"f481d68fa6b9468c474f856d47076737e33db27b5026a18e9ad0f6a2db016d49"} Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.201052 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-config-data\") pod \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\" (UID: \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\") " Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.201098 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-scripts\") pod \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\" (UID: \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\") " Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.201191 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-sg-core-conf-yaml\") pod \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\" (UID: \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\") " Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.201310 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-combined-ca-bundle\") pod \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\" (UID: \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\") " Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.201406 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mx728\" (UniqueName: \"kubernetes.io/projected/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-kube-api-access-mx728\") pod \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\" (UID: \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\") " Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.201455 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-log-httpd\") pod \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\" (UID: \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\") " Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.201490 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-run-httpd\") pod \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\" (UID: \"1e2d4f89-a0e3-43aa-8af6-c07146ef81b2\") " Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.202721 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "1e2d4f89-a0e3-43aa-8af6-c07146ef81b2" (UID: "1e2d4f89-a0e3-43aa-8af6-c07146ef81b2"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.202878 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "1e2d4f89-a0e3-43aa-8af6-c07146ef81b2" (UID: "1e2d4f89-a0e3-43aa-8af6-c07146ef81b2"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.210349 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-scripts" (OuterVolumeSpecName: "scripts") pod "1e2d4f89-a0e3-43aa-8af6-c07146ef81b2" (UID: "1e2d4f89-a0e3-43aa-8af6-c07146ef81b2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.210708 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-kube-api-access-mx728" (OuterVolumeSpecName: "kube-api-access-mx728") pod "1e2d4f89-a0e3-43aa-8af6-c07146ef81b2" (UID: "1e2d4f89-a0e3-43aa-8af6-c07146ef81b2"). InnerVolumeSpecName "kube-api-access-mx728". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.216482 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.239145 4903 scope.go:117] "RemoveContainer" containerID="3e0042e14929a74910f4e7abf177435340687ca74a9ec58bf05cf3b6bb69d6bb" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.251397 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "1e2d4f89-a0e3-43aa-8af6-c07146ef81b2" (UID: "1e2d4f89-a0e3-43aa-8af6-c07146ef81b2"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.258004 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.269220 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 22:43:58 crc kubenswrapper[4903]: E1126 22:43:58.269733 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e2d4f89-a0e3-43aa-8af6-c07146ef81b2" containerName="ceilometer-notification-agent" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.269751 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e2d4f89-a0e3-43aa-8af6-c07146ef81b2" containerName="ceilometer-notification-agent" Nov 26 22:43:58 crc kubenswrapper[4903]: E1126 22:43:58.269769 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e2d4f89-a0e3-43aa-8af6-c07146ef81b2" containerName="sg-core" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.269776 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e2d4f89-a0e3-43aa-8af6-c07146ef81b2" containerName="sg-core" Nov 26 22:43:58 crc kubenswrapper[4903]: E1126 22:43:58.269793 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da85279b-be32-48c7-8e3d-1e216c4b9023" containerName="nova-metadata-log" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.269799 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="da85279b-be32-48c7-8e3d-1e216c4b9023" containerName="nova-metadata-log" Nov 26 22:43:58 crc kubenswrapper[4903]: E1126 22:43:58.269821 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e2d4f89-a0e3-43aa-8af6-c07146ef81b2" containerName="proxy-httpd" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.269827 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e2d4f89-a0e3-43aa-8af6-c07146ef81b2" containerName="proxy-httpd" Nov 26 22:43:58 crc kubenswrapper[4903]: E1126 22:43:58.269838 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ccb9a538-950d-4c50-9ee3-380703481e5e" containerName="mariadb-database-create" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.269845 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ccb9a538-950d-4c50-9ee3-380703481e5e" containerName="mariadb-database-create" Nov 26 22:43:58 crc kubenswrapper[4903]: E1126 22:43:58.269866 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e2d4f89-a0e3-43aa-8af6-c07146ef81b2" containerName="ceilometer-central-agent" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.269872 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e2d4f89-a0e3-43aa-8af6-c07146ef81b2" containerName="ceilometer-central-agent" Nov 26 22:43:58 crc kubenswrapper[4903]: E1126 22:43:58.269885 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="444870af-f53d-4457-9650-f4de59dc6c14" containerName="mariadb-account-create-update" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.269892 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="444870af-f53d-4457-9650-f4de59dc6c14" containerName="mariadb-account-create-update" Nov 26 22:43:58 crc kubenswrapper[4903]: E1126 22:43:58.269905 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da85279b-be32-48c7-8e3d-1e216c4b9023" containerName="nova-metadata-metadata" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.270061 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="da85279b-be32-48c7-8e3d-1e216c4b9023" containerName="nova-metadata-metadata" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.270264 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="da85279b-be32-48c7-8e3d-1e216c4b9023" containerName="nova-metadata-log" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.270280 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="ccb9a538-950d-4c50-9ee3-380703481e5e" containerName="mariadb-database-create" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.270296 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e2d4f89-a0e3-43aa-8af6-c07146ef81b2" containerName="ceilometer-central-agent" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.270307 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e2d4f89-a0e3-43aa-8af6-c07146ef81b2" containerName="proxy-httpd" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.270321 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="444870af-f53d-4457-9650-f4de59dc6c14" containerName="mariadb-account-create-update" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.270332 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="da85279b-be32-48c7-8e3d-1e216c4b9023" containerName="nova-metadata-metadata" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.270342 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e2d4f89-a0e3-43aa-8af6-c07146ef81b2" containerName="ceilometer-notification-agent" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.270351 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e2d4f89-a0e3-43aa-8af6-c07146ef81b2" containerName="sg-core" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.271664 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.273798 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.273987 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.283681 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.304373 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mx728\" (UniqueName: \"kubernetes.io/projected/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-kube-api-access-mx728\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.304533 4903 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.304596 4903 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.304670 4903 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.304750 4903 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.346615 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1e2d4f89-a0e3-43aa-8af6-c07146ef81b2" (UID: "1e2d4f89-a0e3-43aa-8af6-c07146ef81b2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.348204 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-config-data" (OuterVolumeSpecName: "config-data") pod "1e2d4f89-a0e3-43aa-8af6-c07146ef81b2" (UID: "1e2d4f89-a0e3-43aa-8af6-c07146ef81b2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.407035 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bftf5\" (UniqueName: \"kubernetes.io/projected/bda57282-6e21-46c1-8e86-382942170138-kube-api-access-bftf5\") pod \"nova-metadata-0\" (UID: \"bda57282-6e21-46c1-8e86-382942170138\") " pod="openstack/nova-metadata-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.407321 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/bda57282-6e21-46c1-8e86-382942170138-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"bda57282-6e21-46c1-8e86-382942170138\") " pod="openstack/nova-metadata-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.407356 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bda57282-6e21-46c1-8e86-382942170138-config-data\") pod \"nova-metadata-0\" (UID: \"bda57282-6e21-46c1-8e86-382942170138\") " pod="openstack/nova-metadata-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.407429 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bda57282-6e21-46c1-8e86-382942170138-logs\") pod \"nova-metadata-0\" (UID: \"bda57282-6e21-46c1-8e86-382942170138\") " pod="openstack/nova-metadata-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.407483 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bda57282-6e21-46c1-8e86-382942170138-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"bda57282-6e21-46c1-8e86-382942170138\") " pod="openstack/nova-metadata-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.407564 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.407576 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.452405 4903 scope.go:117] "RemoveContainer" containerID="c9e26a9931cdb702542659da8325bd11069c994d578b7ac0cfe21cb9a1983326" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.470533 4903 scope.go:117] "RemoveContainer" containerID="11ffbf70d54669a5f19bf8aab5d66618631acbf582d5225aeab11c0363bd1dbd" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.495845 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.510845 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bftf5\" (UniqueName: \"kubernetes.io/projected/bda57282-6e21-46c1-8e86-382942170138-kube-api-access-bftf5\") pod \"nova-metadata-0\" (UID: \"bda57282-6e21-46c1-8e86-382942170138\") " pod="openstack/nova-metadata-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.510938 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/bda57282-6e21-46c1-8e86-382942170138-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"bda57282-6e21-46c1-8e86-382942170138\") " pod="openstack/nova-metadata-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.511009 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bda57282-6e21-46c1-8e86-382942170138-config-data\") pod \"nova-metadata-0\" (UID: \"bda57282-6e21-46c1-8e86-382942170138\") " pod="openstack/nova-metadata-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.511188 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bda57282-6e21-46c1-8e86-382942170138-logs\") pod \"nova-metadata-0\" (UID: \"bda57282-6e21-46c1-8e86-382942170138\") " pod="openstack/nova-metadata-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.511322 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bda57282-6e21-46c1-8e86-382942170138-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"bda57282-6e21-46c1-8e86-382942170138\") " pod="openstack/nova-metadata-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.512042 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bda57282-6e21-46c1-8e86-382942170138-logs\") pod \"nova-metadata-0\" (UID: \"bda57282-6e21-46c1-8e86-382942170138\") " pod="openstack/nova-metadata-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.516909 4903 scope.go:117] "RemoveContainer" containerID="2fac9d0f4f7f392184f0411d3cd238827def4c59bea60467291bed0e0ff82774" Nov 26 22:43:58 crc kubenswrapper[4903]: E1126 22:43:58.518481 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2fac9d0f4f7f392184f0411d3cd238827def4c59bea60467291bed0e0ff82774\": container with ID starting with 2fac9d0f4f7f392184f0411d3cd238827def4c59bea60467291bed0e0ff82774 not found: ID does not exist" containerID="2fac9d0f4f7f392184f0411d3cd238827def4c59bea60467291bed0e0ff82774" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.518523 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2fac9d0f4f7f392184f0411d3cd238827def4c59bea60467291bed0e0ff82774"} err="failed to get container status \"2fac9d0f4f7f392184f0411d3cd238827def4c59bea60467291bed0e0ff82774\": rpc error: code = NotFound desc = could not find container \"2fac9d0f4f7f392184f0411d3cd238827def4c59bea60467291bed0e0ff82774\": container with ID starting with 2fac9d0f4f7f392184f0411d3cd238827def4c59bea60467291bed0e0ff82774 not found: ID does not exist" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.518565 4903 scope.go:117] "RemoveContainer" containerID="3e0042e14929a74910f4e7abf177435340687ca74a9ec58bf05cf3b6bb69d6bb" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.519534 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:43:58 crc kubenswrapper[4903]: E1126 22:43:58.520705 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e0042e14929a74910f4e7abf177435340687ca74a9ec58bf05cf3b6bb69d6bb\": container with ID starting with 3e0042e14929a74910f4e7abf177435340687ca74a9ec58bf05cf3b6bb69d6bb not found: ID does not exist" containerID="3e0042e14929a74910f4e7abf177435340687ca74a9ec58bf05cf3b6bb69d6bb" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.520726 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e0042e14929a74910f4e7abf177435340687ca74a9ec58bf05cf3b6bb69d6bb"} err="failed to get container status \"3e0042e14929a74910f4e7abf177435340687ca74a9ec58bf05cf3b6bb69d6bb\": rpc error: code = NotFound desc = could not find container \"3e0042e14929a74910f4e7abf177435340687ca74a9ec58bf05cf3b6bb69d6bb\": container with ID starting with 3e0042e14929a74910f4e7abf177435340687ca74a9ec58bf05cf3b6bb69d6bb not found: ID does not exist" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.520771 4903 scope.go:117] "RemoveContainer" containerID="c9e26a9931cdb702542659da8325bd11069c994d578b7ac0cfe21cb9a1983326" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.521439 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bda57282-6e21-46c1-8e86-382942170138-config-data\") pod \"nova-metadata-0\" (UID: \"bda57282-6e21-46c1-8e86-382942170138\") " pod="openstack/nova-metadata-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.521587 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/bda57282-6e21-46c1-8e86-382942170138-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"bda57282-6e21-46c1-8e86-382942170138\") " pod="openstack/nova-metadata-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.524513 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bda57282-6e21-46c1-8e86-382942170138-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"bda57282-6e21-46c1-8e86-382942170138\") " pod="openstack/nova-metadata-0" Nov 26 22:43:58 crc kubenswrapper[4903]: E1126 22:43:58.576316 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c9e26a9931cdb702542659da8325bd11069c994d578b7ac0cfe21cb9a1983326\": container with ID starting with c9e26a9931cdb702542659da8325bd11069c994d578b7ac0cfe21cb9a1983326 not found: ID does not exist" containerID="c9e26a9931cdb702542659da8325bd11069c994d578b7ac0cfe21cb9a1983326" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.576371 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9e26a9931cdb702542659da8325bd11069c994d578b7ac0cfe21cb9a1983326"} err="failed to get container status \"c9e26a9931cdb702542659da8325bd11069c994d578b7ac0cfe21cb9a1983326\": rpc error: code = NotFound desc = could not find container \"c9e26a9931cdb702542659da8325bd11069c994d578b7ac0cfe21cb9a1983326\": container with ID starting with c9e26a9931cdb702542659da8325bd11069c994d578b7ac0cfe21cb9a1983326 not found: ID does not exist" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.576411 4903 scope.go:117] "RemoveContainer" containerID="11ffbf70d54669a5f19bf8aab5d66618631acbf582d5225aeab11c0363bd1dbd" Nov 26 22:43:58 crc kubenswrapper[4903]: E1126 22:43:58.577767 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11ffbf70d54669a5f19bf8aab5d66618631acbf582d5225aeab11c0363bd1dbd\": container with ID starting with 11ffbf70d54669a5f19bf8aab5d66618631acbf582d5225aeab11c0363bd1dbd not found: ID does not exist" containerID="11ffbf70d54669a5f19bf8aab5d66618631acbf582d5225aeab11c0363bd1dbd" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.577812 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11ffbf70d54669a5f19bf8aab5d66618631acbf582d5225aeab11c0363bd1dbd"} err="failed to get container status \"11ffbf70d54669a5f19bf8aab5d66618631acbf582d5225aeab11c0363bd1dbd\": rpc error: code = NotFound desc = could not find container \"11ffbf70d54669a5f19bf8aab5d66618631acbf582d5225aeab11c0363bd1dbd\": container with ID starting with 11ffbf70d54669a5f19bf8aab5d66618631acbf582d5225aeab11c0363bd1dbd not found: ID does not exist" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.577839 4903 scope.go:117] "RemoveContainer" containerID="a207232ddd9df6acf86c8d0226c9c1f3dde10b19208a4c7c402f0f0682ed737e" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.587929 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bftf5\" (UniqueName: \"kubernetes.io/projected/bda57282-6e21-46c1-8e86-382942170138-kube-api-access-bftf5\") pod \"nova-metadata-0\" (UID: \"bda57282-6e21-46c1-8e86-382942170138\") " pod="openstack/nova-metadata-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.600641 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.616739 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.616886 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.618957 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.619318 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.658371 4903 scope.go:117] "RemoveContainer" containerID="1ad27114d780839165f5fc5949f0d5780454875c421cd2531f2b3f27e89f77d6" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.683465 4903 scope.go:117] "RemoveContainer" containerID="a207232ddd9df6acf86c8d0226c9c1f3dde10b19208a4c7c402f0f0682ed737e" Nov 26 22:43:58 crc kubenswrapper[4903]: E1126 22:43:58.684076 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a207232ddd9df6acf86c8d0226c9c1f3dde10b19208a4c7c402f0f0682ed737e\": container with ID starting with a207232ddd9df6acf86c8d0226c9c1f3dde10b19208a4c7c402f0f0682ed737e not found: ID does not exist" containerID="a207232ddd9df6acf86c8d0226c9c1f3dde10b19208a4c7c402f0f0682ed737e" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.684108 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a207232ddd9df6acf86c8d0226c9c1f3dde10b19208a4c7c402f0f0682ed737e"} err="failed to get container status \"a207232ddd9df6acf86c8d0226c9c1f3dde10b19208a4c7c402f0f0682ed737e\": rpc error: code = NotFound desc = could not find container \"a207232ddd9df6acf86c8d0226c9c1f3dde10b19208a4c7c402f0f0682ed737e\": container with ID starting with a207232ddd9df6acf86c8d0226c9c1f3dde10b19208a4c7c402f0f0682ed737e not found: ID does not exist" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.684130 4903 scope.go:117] "RemoveContainer" containerID="1ad27114d780839165f5fc5949f0d5780454875c421cd2531f2b3f27e89f77d6" Nov 26 22:43:58 crc kubenswrapper[4903]: E1126 22:43:58.684504 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1ad27114d780839165f5fc5949f0d5780454875c421cd2531f2b3f27e89f77d6\": container with ID starting with 1ad27114d780839165f5fc5949f0d5780454875c421cd2531f2b3f27e89f77d6 not found: ID does not exist" containerID="1ad27114d780839165f5fc5949f0d5780454875c421cd2531f2b3f27e89f77d6" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.684523 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ad27114d780839165f5fc5949f0d5780454875c421cd2531f2b3f27e89f77d6"} err="failed to get container status \"1ad27114d780839165f5fc5949f0d5780454875c421cd2531f2b3f27e89f77d6\": rpc error: code = NotFound desc = could not find container \"1ad27114d780839165f5fc5949f0d5780454875c421cd2531f2b3f27e89f77d6\": container with ID starting with 1ad27114d780839165f5fc5949f0d5780454875c421cd2531f2b3f27e89f77d6 not found: ID does not exist" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.684535 4903 scope.go:117] "RemoveContainer" containerID="a207232ddd9df6acf86c8d0226c9c1f3dde10b19208a4c7c402f0f0682ed737e" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.685100 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a207232ddd9df6acf86c8d0226c9c1f3dde10b19208a4c7c402f0f0682ed737e"} err="failed to get container status \"a207232ddd9df6acf86c8d0226c9c1f3dde10b19208a4c7c402f0f0682ed737e\": rpc error: code = NotFound desc = could not find container \"a207232ddd9df6acf86c8d0226c9c1f3dde10b19208a4c7c402f0f0682ed737e\": container with ID starting with a207232ddd9df6acf86c8d0226c9c1f3dde10b19208a4c7c402f0f0682ed737e not found: ID does not exist" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.685120 4903 scope.go:117] "RemoveContainer" containerID="1ad27114d780839165f5fc5949f0d5780454875c421cd2531f2b3f27e89f77d6" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.685524 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ad27114d780839165f5fc5949f0d5780454875c421cd2531f2b3f27e89f77d6"} err="failed to get container status \"1ad27114d780839165f5fc5949f0d5780454875c421cd2531f2b3f27e89f77d6\": rpc error: code = NotFound desc = could not find container \"1ad27114d780839165f5fc5949f0d5780454875c421cd2531f2b3f27e89f77d6\": container with ID starting with 1ad27114d780839165f5fc5949f0d5780454875c421cd2531f2b3f27e89f77d6 not found: ID does not exist" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.719505 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fab088e1-6d09-4778-bc6c-1ee9ef695207-config-data\") pod \"ceilometer-0\" (UID: \"fab088e1-6d09-4778-bc6c-1ee9ef695207\") " pod="openstack/ceilometer-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.719822 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fab088e1-6d09-4778-bc6c-1ee9ef695207-log-httpd\") pod \"ceilometer-0\" (UID: \"fab088e1-6d09-4778-bc6c-1ee9ef695207\") " pod="openstack/ceilometer-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.719873 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q8nfw\" (UniqueName: \"kubernetes.io/projected/fab088e1-6d09-4778-bc6c-1ee9ef695207-kube-api-access-q8nfw\") pod \"ceilometer-0\" (UID: \"fab088e1-6d09-4778-bc6c-1ee9ef695207\") " pod="openstack/ceilometer-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.719938 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fab088e1-6d09-4778-bc6c-1ee9ef695207-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fab088e1-6d09-4778-bc6c-1ee9ef695207\") " pod="openstack/ceilometer-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.719994 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fab088e1-6d09-4778-bc6c-1ee9ef695207-run-httpd\") pod \"ceilometer-0\" (UID: \"fab088e1-6d09-4778-bc6c-1ee9ef695207\") " pod="openstack/ceilometer-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.720024 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fab088e1-6d09-4778-bc6c-1ee9ef695207-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fab088e1-6d09-4778-bc6c-1ee9ef695207\") " pod="openstack/ceilometer-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.720141 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fab088e1-6d09-4778-bc6c-1ee9ef695207-scripts\") pod \"ceilometer-0\" (UID: \"fab088e1-6d09-4778-bc6c-1ee9ef695207\") " pod="openstack/ceilometer-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.750920 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.821676 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fab088e1-6d09-4778-bc6c-1ee9ef695207-run-httpd\") pod \"ceilometer-0\" (UID: \"fab088e1-6d09-4778-bc6c-1ee9ef695207\") " pod="openstack/ceilometer-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.821719 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fab088e1-6d09-4778-bc6c-1ee9ef695207-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fab088e1-6d09-4778-bc6c-1ee9ef695207\") " pod="openstack/ceilometer-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.821856 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fab088e1-6d09-4778-bc6c-1ee9ef695207-scripts\") pod \"ceilometer-0\" (UID: \"fab088e1-6d09-4778-bc6c-1ee9ef695207\") " pod="openstack/ceilometer-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.821900 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fab088e1-6d09-4778-bc6c-1ee9ef695207-config-data\") pod \"ceilometer-0\" (UID: \"fab088e1-6d09-4778-bc6c-1ee9ef695207\") " pod="openstack/ceilometer-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.821985 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fab088e1-6d09-4778-bc6c-1ee9ef695207-log-httpd\") pod \"ceilometer-0\" (UID: \"fab088e1-6d09-4778-bc6c-1ee9ef695207\") " pod="openstack/ceilometer-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.822004 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q8nfw\" (UniqueName: \"kubernetes.io/projected/fab088e1-6d09-4778-bc6c-1ee9ef695207-kube-api-access-q8nfw\") pod \"ceilometer-0\" (UID: \"fab088e1-6d09-4778-bc6c-1ee9ef695207\") " pod="openstack/ceilometer-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.822036 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fab088e1-6d09-4778-bc6c-1ee9ef695207-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fab088e1-6d09-4778-bc6c-1ee9ef695207\") " pod="openstack/ceilometer-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.823151 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fab088e1-6d09-4778-bc6c-1ee9ef695207-log-httpd\") pod \"ceilometer-0\" (UID: \"fab088e1-6d09-4778-bc6c-1ee9ef695207\") " pod="openstack/ceilometer-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.825410 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fab088e1-6d09-4778-bc6c-1ee9ef695207-run-httpd\") pod \"ceilometer-0\" (UID: \"fab088e1-6d09-4778-bc6c-1ee9ef695207\") " pod="openstack/ceilometer-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.826025 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fab088e1-6d09-4778-bc6c-1ee9ef695207-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fab088e1-6d09-4778-bc6c-1ee9ef695207\") " pod="openstack/ceilometer-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.828272 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fab088e1-6d09-4778-bc6c-1ee9ef695207-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fab088e1-6d09-4778-bc6c-1ee9ef695207\") " pod="openstack/ceilometer-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.831441 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fab088e1-6d09-4778-bc6c-1ee9ef695207-config-data\") pod \"ceilometer-0\" (UID: \"fab088e1-6d09-4778-bc6c-1ee9ef695207\") " pod="openstack/ceilometer-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.833512 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fab088e1-6d09-4778-bc6c-1ee9ef695207-scripts\") pod \"ceilometer-0\" (UID: \"fab088e1-6d09-4778-bc6c-1ee9ef695207\") " pod="openstack/ceilometer-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.846616 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q8nfw\" (UniqueName: \"kubernetes.io/projected/fab088e1-6d09-4778-bc6c-1ee9ef695207-kube-api-access-q8nfw\") pod \"ceilometer-0\" (UID: \"fab088e1-6d09-4778-bc6c-1ee9ef695207\") " pod="openstack/ceilometer-0" Nov 26 22:43:58 crc kubenswrapper[4903]: I1126 22:43:58.953531 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:43:59 crc kubenswrapper[4903]: I1126 22:43:59.219875 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 22:43:59 crc kubenswrapper[4903]: W1126 22:43:59.267284 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbda57282_6e21_46c1_8e86_382942170138.slice/crio-8710596fae5363c258a006397490b166b7665756d3a1334e6fa2217373f7ffc2 WatchSource:0}: Error finding container 8710596fae5363c258a006397490b166b7665756d3a1334e6fa2217373f7ffc2: Status 404 returned error can't find the container with id 8710596fae5363c258a006397490b166b7665756d3a1334e6fa2217373f7ffc2 Nov 26 22:43:59 crc kubenswrapper[4903]: I1126 22:43:59.429919 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:43:59 crc kubenswrapper[4903]: W1126 22:43:59.436310 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfab088e1_6d09_4778_bc6c_1ee9ef695207.slice/crio-e3b8047a7f8db3545f48296cc9ac8069deaaaffce81df8072617baa4d77b4f6d WatchSource:0}: Error finding container e3b8047a7f8db3545f48296cc9ac8069deaaaffce81df8072617baa4d77b4f6d: Status 404 returned error can't find the container with id e3b8047a7f8db3545f48296cc9ac8069deaaaffce81df8072617baa4d77b4f6d Nov 26 22:44:00 crc kubenswrapper[4903]: I1126 22:44:00.052286 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e2d4f89-a0e3-43aa-8af6-c07146ef81b2" path="/var/lib/kubelet/pods/1e2d4f89-a0e3-43aa-8af6-c07146ef81b2/volumes" Nov 26 22:44:00 crc kubenswrapper[4903]: I1126 22:44:00.054069 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da85279b-be32-48c7-8e3d-1e216c4b9023" path="/var/lib/kubelet/pods/da85279b-be32-48c7-8e3d-1e216c4b9023/volumes" Nov 26 22:44:00 crc kubenswrapper[4903]: I1126 22:44:00.190804 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fab088e1-6d09-4778-bc6c-1ee9ef695207","Type":"ContainerStarted","Data":"4636538c68e79519927e1bdd996a1c06be143b6c3a33711ec80082fb835e046c"} Nov 26 22:44:00 crc kubenswrapper[4903]: I1126 22:44:00.191265 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fab088e1-6d09-4778-bc6c-1ee9ef695207","Type":"ContainerStarted","Data":"e3b8047a7f8db3545f48296cc9ac8069deaaaffce81df8072617baa4d77b4f6d"} Nov 26 22:44:00 crc kubenswrapper[4903]: I1126 22:44:00.192881 4903 generic.go:334] "Generic (PLEG): container finished" podID="2355bf0c-e104-4dcd-888a-e164fd5d89be" containerID="cada2af09253f080bd319ef88c08e5a8d6beb4b9ba4dce77eb48dd3d8b95c18e" exitCode=0 Nov 26 22:44:00 crc kubenswrapper[4903]: I1126 22:44:00.192941 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-v8tkk" event={"ID":"2355bf0c-e104-4dcd-888a-e164fd5d89be","Type":"ContainerDied","Data":"cada2af09253f080bd319ef88c08e5a8d6beb4b9ba4dce77eb48dd3d8b95c18e"} Nov 26 22:44:00 crc kubenswrapper[4903]: I1126 22:44:00.194824 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bda57282-6e21-46c1-8e86-382942170138","Type":"ContainerStarted","Data":"7aade9ab56990ede4839b92d786fbffc42b7844dec9abae99e563ffced08fad4"} Nov 26 22:44:00 crc kubenswrapper[4903]: I1126 22:44:00.194863 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bda57282-6e21-46c1-8e86-382942170138","Type":"ContainerStarted","Data":"e1f7742abdd33d1f5c9eb09b81120ecd990a20ab150588564d4046184d3cef8b"} Nov 26 22:44:00 crc kubenswrapper[4903]: I1126 22:44:00.194872 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bda57282-6e21-46c1-8e86-382942170138","Type":"ContainerStarted","Data":"8710596fae5363c258a006397490b166b7665756d3a1334e6fa2217373f7ffc2"} Nov 26 22:44:00 crc kubenswrapper[4903]: I1126 22:44:00.243277 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.243258208 podStartE2EDuration="2.243258208s" podCreationTimestamp="2025-11-26 22:43:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:44:00.24224225 +0000 UTC m=+1368.932477170" watchObservedRunningTime="2025-11-26 22:44:00.243258208 +0000 UTC m=+1368.933493118" Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.099367 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-njqgv"] Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.100987 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-njqgv" Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.102884 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.103322 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-pkbb4" Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.103457 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.104364 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.112753 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-njqgv"] Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.184358 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45c66290-f98d-4c93-ac95-b63cd9e0777c-scripts\") pod \"aodh-db-sync-njqgv\" (UID: \"45c66290-f98d-4c93-ac95-b63cd9e0777c\") " pod="openstack/aodh-db-sync-njqgv" Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.184412 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45c66290-f98d-4c93-ac95-b63cd9e0777c-config-data\") pod \"aodh-db-sync-njqgv\" (UID: \"45c66290-f98d-4c93-ac95-b63cd9e0777c\") " pod="openstack/aodh-db-sync-njqgv" Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.184436 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6k6kk\" (UniqueName: \"kubernetes.io/projected/45c66290-f98d-4c93-ac95-b63cd9e0777c-kube-api-access-6k6kk\") pod \"aodh-db-sync-njqgv\" (UID: \"45c66290-f98d-4c93-ac95-b63cd9e0777c\") " pod="openstack/aodh-db-sync-njqgv" Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.184469 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45c66290-f98d-4c93-ac95-b63cd9e0777c-combined-ca-bundle\") pod \"aodh-db-sync-njqgv\" (UID: \"45c66290-f98d-4c93-ac95-b63cd9e0777c\") " pod="openstack/aodh-db-sync-njqgv" Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.206378 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fab088e1-6d09-4778-bc6c-1ee9ef695207","Type":"ContainerStarted","Data":"e35851a05f8cf98c55df9dbf6d7af444e0a39f90b8a2688daacb5d9f48067cd6"} Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.208541 4903 generic.go:334] "Generic (PLEG): container finished" podID="be8f91a6-a9bc-4273-ad51-664e6e64ebb0" containerID="69697950149a64697ed4335dcd25e520b542879081887e309883001df4f79219" exitCode=0 Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.208597 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-zs7fj" event={"ID":"be8f91a6-a9bc-4273-ad51-664e6e64ebb0","Type":"ContainerDied","Data":"69697950149a64697ed4335dcd25e520b542879081887e309883001df4f79219"} Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.231616 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.231678 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.245158 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.285644 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.286102 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45c66290-f98d-4c93-ac95-b63cd9e0777c-scripts\") pod \"aodh-db-sync-njqgv\" (UID: \"45c66290-f98d-4c93-ac95-b63cd9e0777c\") " pod="openstack/aodh-db-sync-njqgv" Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.286154 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45c66290-f98d-4c93-ac95-b63cd9e0777c-config-data\") pod \"aodh-db-sync-njqgv\" (UID: \"45c66290-f98d-4c93-ac95-b63cd9e0777c\") " pod="openstack/aodh-db-sync-njqgv" Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.286177 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6k6kk\" (UniqueName: \"kubernetes.io/projected/45c66290-f98d-4c93-ac95-b63cd9e0777c-kube-api-access-6k6kk\") pod \"aodh-db-sync-njqgv\" (UID: \"45c66290-f98d-4c93-ac95-b63cd9e0777c\") " pod="openstack/aodh-db-sync-njqgv" Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.286215 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45c66290-f98d-4c93-ac95-b63cd9e0777c-combined-ca-bundle\") pod \"aodh-db-sync-njqgv\" (UID: \"45c66290-f98d-4c93-ac95-b63cd9e0777c\") " pod="openstack/aodh-db-sync-njqgv" Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.295806 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45c66290-f98d-4c93-ac95-b63cd9e0777c-config-data\") pod \"aodh-db-sync-njqgv\" (UID: \"45c66290-f98d-4c93-ac95-b63cd9e0777c\") " pod="openstack/aodh-db-sync-njqgv" Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.296165 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45c66290-f98d-4c93-ac95-b63cd9e0777c-scripts\") pod \"aodh-db-sync-njqgv\" (UID: \"45c66290-f98d-4c93-ac95-b63cd9e0777c\") " pod="openstack/aodh-db-sync-njqgv" Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.305546 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45c66290-f98d-4c93-ac95-b63cd9e0777c-combined-ca-bundle\") pod \"aodh-db-sync-njqgv\" (UID: \"45c66290-f98d-4c93-ac95-b63cd9e0777c\") " pod="openstack/aodh-db-sync-njqgv" Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.306885 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.307674 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6k6kk\" (UniqueName: \"kubernetes.io/projected/45c66290-f98d-4c93-ac95-b63cd9e0777c-kube-api-access-6k6kk\") pod \"aodh-db-sync-njqgv\" (UID: \"45c66290-f98d-4c93-ac95-b63cd9e0777c\") " pod="openstack/aodh-db-sync-njqgv" Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.415403 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-njqgv" Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.425757 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7756b9d78c-8c58d"] Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.425978 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" podUID="ae631441-ed7e-48cc-8d5a-6dd39122a07a" containerName="dnsmasq-dns" containerID="cri-o://a7ed7eca4d9ca7cc58709f9238343879b90c2ff5e7accb43d5a1675456d28b4f" gracePeriod=10 Nov 26 22:44:01 crc kubenswrapper[4903]: I1126 22:44:01.953430 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-v8tkk" Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.013912 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2355bf0c-e104-4dcd-888a-e164fd5d89be-config-data\") pod \"2355bf0c-e104-4dcd-888a-e164fd5d89be\" (UID: \"2355bf0c-e104-4dcd-888a-e164fd5d89be\") " Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.014276 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9slv4\" (UniqueName: \"kubernetes.io/projected/2355bf0c-e104-4dcd-888a-e164fd5d89be-kube-api-access-9slv4\") pod \"2355bf0c-e104-4dcd-888a-e164fd5d89be\" (UID: \"2355bf0c-e104-4dcd-888a-e164fd5d89be\") " Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.014443 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2355bf0c-e104-4dcd-888a-e164fd5d89be-scripts\") pod \"2355bf0c-e104-4dcd-888a-e164fd5d89be\" (UID: \"2355bf0c-e104-4dcd-888a-e164fd5d89be\") " Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.014635 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2355bf0c-e104-4dcd-888a-e164fd5d89be-combined-ca-bundle\") pod \"2355bf0c-e104-4dcd-888a-e164fd5d89be\" (UID: \"2355bf0c-e104-4dcd-888a-e164fd5d89be\") " Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.076085 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2355bf0c-e104-4dcd-888a-e164fd5d89be-kube-api-access-9slv4" (OuterVolumeSpecName: "kube-api-access-9slv4") pod "2355bf0c-e104-4dcd-888a-e164fd5d89be" (UID: "2355bf0c-e104-4dcd-888a-e164fd5d89be"). InnerVolumeSpecName "kube-api-access-9slv4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.076194 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2355bf0c-e104-4dcd-888a-e164fd5d89be-scripts" (OuterVolumeSpecName: "scripts") pod "2355bf0c-e104-4dcd-888a-e164fd5d89be" (UID: "2355bf0c-e104-4dcd-888a-e164fd5d89be"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.134088 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2355bf0c-e104-4dcd-888a-e164fd5d89be-config-data" (OuterVolumeSpecName: "config-data") pod "2355bf0c-e104-4dcd-888a-e164fd5d89be" (UID: "2355bf0c-e104-4dcd-888a-e164fd5d89be"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.136593 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9slv4\" (UniqueName: \"kubernetes.io/projected/2355bf0c-e104-4dcd-888a-e164fd5d89be-kube-api-access-9slv4\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.136626 4903 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2355bf0c-e104-4dcd-888a-e164fd5d89be-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.136659 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2355bf0c-e104-4dcd-888a-e164fd5d89be-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.163989 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2355bf0c-e104-4dcd-888a-e164fd5d89be-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2355bf0c-e104-4dcd-888a-e164fd5d89be" (UID: "2355bf0c-e104-4dcd-888a-e164fd5d89be"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.228647 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-v8tkk" event={"ID":"2355bf0c-e104-4dcd-888a-e164fd5d89be","Type":"ContainerDied","Data":"d4f2d714bbc847deda8cd00c739884308fdea0b4306cc25d71ee4918e6ae1fb9"} Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.228690 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d4f2d714bbc847deda8cd00c739884308fdea0b4306cc25d71ee4918e6ae1fb9" Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.228782 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-v8tkk" Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.233231 4903 generic.go:334] "Generic (PLEG): container finished" podID="ae631441-ed7e-48cc-8d5a-6dd39122a07a" containerID="a7ed7eca4d9ca7cc58709f9238343879b90c2ff5e7accb43d5a1675456d28b4f" exitCode=0 Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.233397 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" event={"ID":"ae631441-ed7e-48cc-8d5a-6dd39122a07a","Type":"ContainerDied","Data":"a7ed7eca4d9ca7cc58709f9238343879b90c2ff5e7accb43d5a1675456d28b4f"} Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.238036 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2355bf0c-e104-4dcd-888a-e164fd5d89be-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.296251 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.316031 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="5889d0d5-76b2-4fda-8b1f-65309d462a62" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.229:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.316118 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="5889d0d5-76b2-4fda-8b1f-65309d462a62" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.229:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.441873 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-njqgv"] Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.558891 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.560402 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="5889d0d5-76b2-4fda-8b1f-65309d462a62" containerName="nova-api-log" containerID="cri-o://dbdf7a938535a4f190cc734bf159cd24a096fb0decb75b167b58ad5864ed9e39" gracePeriod=30 Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.560771 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="5889d0d5-76b2-4fda-8b1f-65309d462a62" containerName="nova-api-api" containerID="cri-o://9fc4d9f5c0b64cbe7a4dbfaa84620c3746a34ed22b5e74c5c0cdae2197a0dd00" gracePeriod=30 Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.595189 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.595385 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="bda57282-6e21-46c1-8e86-382942170138" containerName="nova-metadata-log" containerID="cri-o://e1f7742abdd33d1f5c9eb09b81120ecd990a20ab150588564d4046184d3cef8b" gracePeriod=30 Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.595510 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="bda57282-6e21-46c1-8e86-382942170138" containerName="nova-metadata-metadata" containerID="cri-o://7aade9ab56990ede4839b92d786fbffc42b7844dec9abae99e563ffced08fad4" gracePeriod=30 Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.624973 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.760237 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ae631441-ed7e-48cc-8d5a-6dd39122a07a-ovsdbserver-sb\") pod \"ae631441-ed7e-48cc-8d5a-6dd39122a07a\" (UID: \"ae631441-ed7e-48cc-8d5a-6dd39122a07a\") " Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.760298 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gt5rj\" (UniqueName: \"kubernetes.io/projected/ae631441-ed7e-48cc-8d5a-6dd39122a07a-kube-api-access-gt5rj\") pod \"ae631441-ed7e-48cc-8d5a-6dd39122a07a\" (UID: \"ae631441-ed7e-48cc-8d5a-6dd39122a07a\") " Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.760355 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ae631441-ed7e-48cc-8d5a-6dd39122a07a-dns-svc\") pod \"ae631441-ed7e-48cc-8d5a-6dd39122a07a\" (UID: \"ae631441-ed7e-48cc-8d5a-6dd39122a07a\") " Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.760390 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ae631441-ed7e-48cc-8d5a-6dd39122a07a-dns-swift-storage-0\") pod \"ae631441-ed7e-48cc-8d5a-6dd39122a07a\" (UID: \"ae631441-ed7e-48cc-8d5a-6dd39122a07a\") " Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.760407 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae631441-ed7e-48cc-8d5a-6dd39122a07a-config\") pod \"ae631441-ed7e-48cc-8d5a-6dd39122a07a\" (UID: \"ae631441-ed7e-48cc-8d5a-6dd39122a07a\") " Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.760745 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ae631441-ed7e-48cc-8d5a-6dd39122a07a-ovsdbserver-nb\") pod \"ae631441-ed7e-48cc-8d5a-6dd39122a07a\" (UID: \"ae631441-ed7e-48cc-8d5a-6dd39122a07a\") " Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.784396 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae631441-ed7e-48cc-8d5a-6dd39122a07a-kube-api-access-gt5rj" (OuterVolumeSpecName: "kube-api-access-gt5rj") pod "ae631441-ed7e-48cc-8d5a-6dd39122a07a" (UID: "ae631441-ed7e-48cc-8d5a-6dd39122a07a"). InnerVolumeSpecName "kube-api-access-gt5rj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.864253 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gt5rj\" (UniqueName: \"kubernetes.io/projected/ae631441-ed7e-48cc-8d5a-6dd39122a07a-kube-api-access-gt5rj\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.909598 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-zs7fj" Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.910390 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae631441-ed7e-48cc-8d5a-6dd39122a07a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ae631441-ed7e-48cc-8d5a-6dd39122a07a" (UID: "ae631441-ed7e-48cc-8d5a-6dd39122a07a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.910410 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae631441-ed7e-48cc-8d5a-6dd39122a07a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ae631441-ed7e-48cc-8d5a-6dd39122a07a" (UID: "ae631441-ed7e-48cc-8d5a-6dd39122a07a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.917996 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae631441-ed7e-48cc-8d5a-6dd39122a07a-config" (OuterVolumeSpecName: "config") pod "ae631441-ed7e-48cc-8d5a-6dd39122a07a" (UID: "ae631441-ed7e-48cc-8d5a-6dd39122a07a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.934950 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae631441-ed7e-48cc-8d5a-6dd39122a07a-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "ae631441-ed7e-48cc-8d5a-6dd39122a07a" (UID: "ae631441-ed7e-48cc-8d5a-6dd39122a07a"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.935189 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae631441-ed7e-48cc-8d5a-6dd39122a07a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ae631441-ed7e-48cc-8d5a-6dd39122a07a" (UID: "ae631441-ed7e-48cc-8d5a-6dd39122a07a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.964980 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be8f91a6-a9bc-4273-ad51-664e6e64ebb0-scripts\") pod \"be8f91a6-a9bc-4273-ad51-664e6e64ebb0\" (UID: \"be8f91a6-a9bc-4273-ad51-664e6e64ebb0\") " Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.965218 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g2dcq\" (UniqueName: \"kubernetes.io/projected/be8f91a6-a9bc-4273-ad51-664e6e64ebb0-kube-api-access-g2dcq\") pod \"be8f91a6-a9bc-4273-ad51-664e6e64ebb0\" (UID: \"be8f91a6-a9bc-4273-ad51-664e6e64ebb0\") " Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.965287 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be8f91a6-a9bc-4273-ad51-664e6e64ebb0-combined-ca-bundle\") pod \"be8f91a6-a9bc-4273-ad51-664e6e64ebb0\" (UID: \"be8f91a6-a9bc-4273-ad51-664e6e64ebb0\") " Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.965316 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be8f91a6-a9bc-4273-ad51-664e6e64ebb0-config-data\") pod \"be8f91a6-a9bc-4273-ad51-664e6e64ebb0\" (UID: \"be8f91a6-a9bc-4273-ad51-664e6e64ebb0\") " Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.965857 4903 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ae631441-ed7e-48cc-8d5a-6dd39122a07a-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.965869 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae631441-ed7e-48cc-8d5a-6dd39122a07a-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.965878 4903 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ae631441-ed7e-48cc-8d5a-6dd39122a07a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.965887 4903 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ae631441-ed7e-48cc-8d5a-6dd39122a07a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.965894 4903 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ae631441-ed7e-48cc-8d5a-6dd39122a07a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.971781 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be8f91a6-a9bc-4273-ad51-664e6e64ebb0-scripts" (OuterVolumeSpecName: "scripts") pod "be8f91a6-a9bc-4273-ad51-664e6e64ebb0" (UID: "be8f91a6-a9bc-4273-ad51-664e6e64ebb0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:02 crc kubenswrapper[4903]: I1126 22:44:02.982671 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be8f91a6-a9bc-4273-ad51-664e6e64ebb0-kube-api-access-g2dcq" (OuterVolumeSpecName: "kube-api-access-g2dcq") pod "be8f91a6-a9bc-4273-ad51-664e6e64ebb0" (UID: "be8f91a6-a9bc-4273-ad51-664e6e64ebb0"). InnerVolumeSpecName "kube-api-access-g2dcq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.020609 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be8f91a6-a9bc-4273-ad51-664e6e64ebb0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "be8f91a6-a9bc-4273-ad51-664e6e64ebb0" (UID: "be8f91a6-a9bc-4273-ad51-664e6e64ebb0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.028206 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be8f91a6-a9bc-4273-ad51-664e6e64ebb0-config-data" (OuterVolumeSpecName: "config-data") pod "be8f91a6-a9bc-4273-ad51-664e6e64ebb0" (UID: "be8f91a6-a9bc-4273-ad51-664e6e64ebb0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.068817 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be8f91a6-a9bc-4273-ad51-664e6e64ebb0-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.068842 4903 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be8f91a6-a9bc-4273-ad51-664e6e64ebb0-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.068853 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g2dcq\" (UniqueName: \"kubernetes.io/projected/be8f91a6-a9bc-4273-ad51-664e6e64ebb0-kube-api-access-g2dcq\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.068864 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be8f91a6-a9bc-4273-ad51-664e6e64ebb0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.139042 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.339680 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 22:44:03 crc kubenswrapper[4903]: E1126 22:44:03.340642 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be8f91a6-a9bc-4273-ad51-664e6e64ebb0" containerName="nova-cell1-conductor-db-sync" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.340656 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="be8f91a6-a9bc-4273-ad51-664e6e64ebb0" containerName="nova-cell1-conductor-db-sync" Nov 26 22:44:03 crc kubenswrapper[4903]: E1126 22:44:03.340675 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2355bf0c-e104-4dcd-888a-e164fd5d89be" containerName="nova-manage" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.340682 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="2355bf0c-e104-4dcd-888a-e164fd5d89be" containerName="nova-manage" Nov 26 22:44:03 crc kubenswrapper[4903]: E1126 22:44:03.340733 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae631441-ed7e-48cc-8d5a-6dd39122a07a" containerName="init" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.340740 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae631441-ed7e-48cc-8d5a-6dd39122a07a" containerName="init" Nov 26 22:44:03 crc kubenswrapper[4903]: E1126 22:44:03.340754 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae631441-ed7e-48cc-8d5a-6dd39122a07a" containerName="dnsmasq-dns" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.340760 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae631441-ed7e-48cc-8d5a-6dd39122a07a" containerName="dnsmasq-dns" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.341564 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="be8f91a6-a9bc-4273-ad51-664e6e64ebb0" containerName="nova-cell1-conductor-db-sync" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.341595 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae631441-ed7e-48cc-8d5a-6dd39122a07a" containerName="dnsmasq-dns" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.341617 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="2355bf0c-e104-4dcd-888a-e164fd5d89be" containerName="nova-manage" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.342752 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.370648 4903 generic.go:334] "Generic (PLEG): container finished" podID="bda57282-6e21-46c1-8e86-382942170138" containerID="7aade9ab56990ede4839b92d786fbffc42b7844dec9abae99e563ffced08fad4" exitCode=0 Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.370675 4903 generic.go:334] "Generic (PLEG): container finished" podID="bda57282-6e21-46c1-8e86-382942170138" containerID="e1f7742abdd33d1f5c9eb09b81120ecd990a20ab150588564d4046184d3cef8b" exitCode=143 Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.370729 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bda57282-6e21-46c1-8e86-382942170138","Type":"ContainerDied","Data":"7aade9ab56990ede4839b92d786fbffc42b7844dec9abae99e563ffced08fad4"} Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.370785 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bda57282-6e21-46c1-8e86-382942170138","Type":"ContainerDied","Data":"e1f7742abdd33d1f5c9eb09b81120ecd990a20ab150588564d4046184d3cef8b"} Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.391360 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-zs7fj" event={"ID":"be8f91a6-a9bc-4273-ad51-664e6e64ebb0","Type":"ContainerDied","Data":"1d8592296137cc8c2b317e37922cf90d7cd771e67baa30a33342898bd93dbf00"} Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.391392 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1d8592296137cc8c2b317e37922cf90d7cd771e67baa30a33342898bd93dbf00" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.391485 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-zs7fj" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.397544 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" event={"ID":"ae631441-ed7e-48cc-8d5a-6dd39122a07a","Type":"ContainerDied","Data":"58e33f15f9e825696e9e5cef4b484828ae24da85cd08a4bbf9cb54f56b0bf36b"} Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.397596 4903 scope.go:117] "RemoveContainer" containerID="a7ed7eca4d9ca7cc58709f9238343879b90c2ff5e7accb43d5a1675456d28b4f" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.397755 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7756b9d78c-8c58d" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.415290 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d4be6e3-d909-4e4f-b5a0-3c949c02421a-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"7d4be6e3-d909-4e4f-b5a0-3c949c02421a\") " pod="openstack/nova-cell1-conductor-0" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.415406 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjzvj\" (UniqueName: \"kubernetes.io/projected/7d4be6e3-d909-4e4f-b5a0-3c949c02421a-kube-api-access-zjzvj\") pod \"nova-cell1-conductor-0\" (UID: \"7d4be6e3-d909-4e4f-b5a0-3c949c02421a\") " pod="openstack/nova-cell1-conductor-0" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.415698 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d4be6e3-d909-4e4f-b5a0-3c949c02421a-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"7d4be6e3-d909-4e4f-b5a0-3c949c02421a\") " pod="openstack/nova-cell1-conductor-0" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.418259 4903 generic.go:334] "Generic (PLEG): container finished" podID="5889d0d5-76b2-4fda-8b1f-65309d462a62" containerID="dbdf7a938535a4f190cc734bf159cd24a096fb0decb75b167b58ad5864ed9e39" exitCode=143 Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.418317 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5889d0d5-76b2-4fda-8b1f-65309d462a62","Type":"ContainerDied","Data":"dbdf7a938535a4f190cc734bf159cd24a096fb0decb75b167b58ad5864ed9e39"} Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.420427 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-njqgv" event={"ID":"45c66290-f98d-4c93-ac95-b63cd9e0777c","Type":"ContainerStarted","Data":"9f0683ad32227bd8797ffd143ab09f9ed45976b89c2d8fac835cccae3e9a311f"} Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.446022 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fab088e1-6d09-4778-bc6c-1ee9ef695207","Type":"ContainerStarted","Data":"5fe72152458ad2fc3fb198ee0ff3d0000dae3ae3763f3b2d5035bc605b501131"} Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.463227 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.474862 4903 scope.go:117] "RemoveContainer" containerID="ee15bc0b8c8ea891f2f46747cac633aac5d252954cbdee237ebe4f1fd7f3ccf2" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.496851 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7756b9d78c-8c58d"] Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.515068 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7756b9d78c-8c58d"] Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.521261 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d4be6e3-d909-4e4f-b5a0-3c949c02421a-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"7d4be6e3-d909-4e4f-b5a0-3c949c02421a\") " pod="openstack/nova-cell1-conductor-0" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.521331 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjzvj\" (UniqueName: \"kubernetes.io/projected/7d4be6e3-d909-4e4f-b5a0-3c949c02421a-kube-api-access-zjzvj\") pod \"nova-cell1-conductor-0\" (UID: \"7d4be6e3-d909-4e4f-b5a0-3c949c02421a\") " pod="openstack/nova-cell1-conductor-0" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.521494 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d4be6e3-d909-4e4f-b5a0-3c949c02421a-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"7d4be6e3-d909-4e4f-b5a0-3c949c02421a\") " pod="openstack/nova-cell1-conductor-0" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.527327 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d4be6e3-d909-4e4f-b5a0-3c949c02421a-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"7d4be6e3-d909-4e4f-b5a0-3c949c02421a\") " pod="openstack/nova-cell1-conductor-0" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.541647 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d4be6e3-d909-4e4f-b5a0-3c949c02421a-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"7d4be6e3-d909-4e4f-b5a0-3c949c02421a\") " pod="openstack/nova-cell1-conductor-0" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.546136 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjzvj\" (UniqueName: \"kubernetes.io/projected/7d4be6e3-d909-4e4f-b5a0-3c949c02421a-kube-api-access-zjzvj\") pod \"nova-cell1-conductor-0\" (UID: \"7d4be6e3-d909-4e4f-b5a0-3c949c02421a\") " pod="openstack/nova-cell1-conductor-0" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.650858 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.711286 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.724555 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bda57282-6e21-46c1-8e86-382942170138-logs\") pod \"bda57282-6e21-46c1-8e86-382942170138\" (UID: \"bda57282-6e21-46c1-8e86-382942170138\") " Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.724601 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bda57282-6e21-46c1-8e86-382942170138-config-data\") pod \"bda57282-6e21-46c1-8e86-382942170138\" (UID: \"bda57282-6e21-46c1-8e86-382942170138\") " Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.724625 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/bda57282-6e21-46c1-8e86-382942170138-nova-metadata-tls-certs\") pod \"bda57282-6e21-46c1-8e86-382942170138\" (UID: \"bda57282-6e21-46c1-8e86-382942170138\") " Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.724737 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bda57282-6e21-46c1-8e86-382942170138-combined-ca-bundle\") pod \"bda57282-6e21-46c1-8e86-382942170138\" (UID: \"bda57282-6e21-46c1-8e86-382942170138\") " Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.724792 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bftf5\" (UniqueName: \"kubernetes.io/projected/bda57282-6e21-46c1-8e86-382942170138-kube-api-access-bftf5\") pod \"bda57282-6e21-46c1-8e86-382942170138\" (UID: \"bda57282-6e21-46c1-8e86-382942170138\") " Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.739437 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bda57282-6e21-46c1-8e86-382942170138-logs" (OuterVolumeSpecName: "logs") pod "bda57282-6e21-46c1-8e86-382942170138" (UID: "bda57282-6e21-46c1-8e86-382942170138"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.770636 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bda57282-6e21-46c1-8e86-382942170138-config-data" (OuterVolumeSpecName: "config-data") pod "bda57282-6e21-46c1-8e86-382942170138" (UID: "bda57282-6e21-46c1-8e86-382942170138"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.780025 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bda57282-6e21-46c1-8e86-382942170138-kube-api-access-bftf5" (OuterVolumeSpecName: "kube-api-access-bftf5") pod "bda57282-6e21-46c1-8e86-382942170138" (UID: "bda57282-6e21-46c1-8e86-382942170138"). InnerVolumeSpecName "kube-api-access-bftf5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.790772 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bda57282-6e21-46c1-8e86-382942170138-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bda57282-6e21-46c1-8e86-382942170138" (UID: "bda57282-6e21-46c1-8e86-382942170138"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.829769 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bftf5\" (UniqueName: \"kubernetes.io/projected/bda57282-6e21-46c1-8e86-382942170138-kube-api-access-bftf5\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.829971 4903 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bda57282-6e21-46c1-8e86-382942170138-logs\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.830040 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bda57282-6e21-46c1-8e86-382942170138-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.830113 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bda57282-6e21-46c1-8e86-382942170138-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.922933 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bda57282-6e21-46c1-8e86-382942170138-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "bda57282-6e21-46c1-8e86-382942170138" (UID: "bda57282-6e21-46c1-8e86-382942170138"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:03 crc kubenswrapper[4903]: I1126 22:44:03.932375 4903 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/bda57282-6e21-46c1-8e86-382942170138-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.055878 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae631441-ed7e-48cc-8d5a-6dd39122a07a" path="/var/lib/kubelet/pods/ae631441-ed7e-48cc-8d5a-6dd39122a07a/volumes" Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.294590 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.505551 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bda57282-6e21-46c1-8e86-382942170138","Type":"ContainerDied","Data":"8710596fae5363c258a006397490b166b7665756d3a1334e6fa2217373f7ffc2"} Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.505606 4903 scope.go:117] "RemoveContainer" containerID="7aade9ab56990ede4839b92d786fbffc42b7844dec9abae99e563ffced08fad4" Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.505781 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.531933 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fab088e1-6d09-4778-bc6c-1ee9ef695207","Type":"ContainerStarted","Data":"283b2c8afa43d21ee775601d8bd97c7c93bc3a6db8a957b602792cb389e98a8f"} Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.532078 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.533216 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"7d4be6e3-d909-4e4f-b5a0-3c949c02421a","Type":"ContainerStarted","Data":"239ae7bf451895e5ab4eba38c0e356c096aab843bc8cae30ac2ed5525b492d85"} Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.533417 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="8c1fed03-8447-4f90-8dc0-013a732cb664" containerName="nova-scheduler-scheduler" containerID="cri-o://f76b4ccbb388ccd60c0394fefe98877a36426ab6ef8e783943c13b74e1ff5cac" gracePeriod=30 Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.555927 4903 scope.go:117] "RemoveContainer" containerID="e1f7742abdd33d1f5c9eb09b81120ecd990a20ab150588564d4046184d3cef8b" Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.580653 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.20159917 podStartE2EDuration="6.580633268s" podCreationTimestamp="2025-11-26 22:43:58 +0000 UTC" firstStartedPulling="2025-11-26 22:43:59.441198264 +0000 UTC m=+1368.131433174" lastFinishedPulling="2025-11-26 22:44:03.820232362 +0000 UTC m=+1372.510467272" observedRunningTime="2025-11-26 22:44:04.561411262 +0000 UTC m=+1373.251646172" watchObservedRunningTime="2025-11-26 22:44:04.580633268 +0000 UTC m=+1373.270868178" Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.595518 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.606079 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.635004 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 22:44:04 crc kubenswrapper[4903]: E1126 22:44:04.635505 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bda57282-6e21-46c1-8e86-382942170138" containerName="nova-metadata-metadata" Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.635523 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="bda57282-6e21-46c1-8e86-382942170138" containerName="nova-metadata-metadata" Nov 26 22:44:04 crc kubenswrapper[4903]: E1126 22:44:04.635572 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bda57282-6e21-46c1-8e86-382942170138" containerName="nova-metadata-log" Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.635579 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="bda57282-6e21-46c1-8e86-382942170138" containerName="nova-metadata-log" Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.635819 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="bda57282-6e21-46c1-8e86-382942170138" containerName="nova-metadata-metadata" Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.635841 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="bda57282-6e21-46c1-8e86-382942170138" containerName="nova-metadata-log" Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.637107 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.642061 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.642281 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.655573 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.761503 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc6693f3-abb4-4ac9-9535-ff92ff9a740e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"bc6693f3-abb4-4ac9-9535-ff92ff9a740e\") " pod="openstack/nova-metadata-0" Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.762062 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc6693f3-abb4-4ac9-9535-ff92ff9a740e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"bc6693f3-abb4-4ac9-9535-ff92ff9a740e\") " pod="openstack/nova-metadata-0" Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.762202 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc6693f3-abb4-4ac9-9535-ff92ff9a740e-logs\") pod \"nova-metadata-0\" (UID: \"bc6693f3-abb4-4ac9-9535-ff92ff9a740e\") " pod="openstack/nova-metadata-0" Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.762282 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pj7n8\" (UniqueName: \"kubernetes.io/projected/bc6693f3-abb4-4ac9-9535-ff92ff9a740e-kube-api-access-pj7n8\") pod \"nova-metadata-0\" (UID: \"bc6693f3-abb4-4ac9-9535-ff92ff9a740e\") " pod="openstack/nova-metadata-0" Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.762367 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc6693f3-abb4-4ac9-9535-ff92ff9a740e-config-data\") pod \"nova-metadata-0\" (UID: \"bc6693f3-abb4-4ac9-9535-ff92ff9a740e\") " pod="openstack/nova-metadata-0" Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.866181 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc6693f3-abb4-4ac9-9535-ff92ff9a740e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"bc6693f3-abb4-4ac9-9535-ff92ff9a740e\") " pod="openstack/nova-metadata-0" Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.866259 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc6693f3-abb4-4ac9-9535-ff92ff9a740e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"bc6693f3-abb4-4ac9-9535-ff92ff9a740e\") " pod="openstack/nova-metadata-0" Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.866329 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc6693f3-abb4-4ac9-9535-ff92ff9a740e-logs\") pod \"nova-metadata-0\" (UID: \"bc6693f3-abb4-4ac9-9535-ff92ff9a740e\") " pod="openstack/nova-metadata-0" Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.866355 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pj7n8\" (UniqueName: \"kubernetes.io/projected/bc6693f3-abb4-4ac9-9535-ff92ff9a740e-kube-api-access-pj7n8\") pod \"nova-metadata-0\" (UID: \"bc6693f3-abb4-4ac9-9535-ff92ff9a740e\") " pod="openstack/nova-metadata-0" Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.866383 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc6693f3-abb4-4ac9-9535-ff92ff9a740e-config-data\") pod \"nova-metadata-0\" (UID: \"bc6693f3-abb4-4ac9-9535-ff92ff9a740e\") " pod="openstack/nova-metadata-0" Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.871255 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc6693f3-abb4-4ac9-9535-ff92ff9a740e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"bc6693f3-abb4-4ac9-9535-ff92ff9a740e\") " pod="openstack/nova-metadata-0" Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.873067 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc6693f3-abb4-4ac9-9535-ff92ff9a740e-logs\") pod \"nova-metadata-0\" (UID: \"bc6693f3-abb4-4ac9-9535-ff92ff9a740e\") " pod="openstack/nova-metadata-0" Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.879550 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc6693f3-abb4-4ac9-9535-ff92ff9a740e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"bc6693f3-abb4-4ac9-9535-ff92ff9a740e\") " pod="openstack/nova-metadata-0" Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.887196 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc6693f3-abb4-4ac9-9535-ff92ff9a740e-config-data\") pod \"nova-metadata-0\" (UID: \"bc6693f3-abb4-4ac9-9535-ff92ff9a740e\") " pod="openstack/nova-metadata-0" Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.903598 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pj7n8\" (UniqueName: \"kubernetes.io/projected/bc6693f3-abb4-4ac9-9535-ff92ff9a740e-kube-api-access-pj7n8\") pod \"nova-metadata-0\" (UID: \"bc6693f3-abb4-4ac9-9535-ff92ff9a740e\") " pod="openstack/nova-metadata-0" Nov 26 22:44:04 crc kubenswrapper[4903]: I1126 22:44:04.969160 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 22:44:05 crc kubenswrapper[4903]: I1126 22:44:05.528216 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 22:44:05 crc kubenswrapper[4903]: W1126 22:44:05.535887 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbc6693f3_abb4_4ac9_9535_ff92ff9a740e.slice/crio-17a545282f76d7fb261e86179768215072e0ce65d5bfc651c0d32d5ffe7770da WatchSource:0}: Error finding container 17a545282f76d7fb261e86179768215072e0ce65d5bfc651c0d32d5ffe7770da: Status 404 returned error can't find the container with id 17a545282f76d7fb261e86179768215072e0ce65d5bfc651c0d32d5ffe7770da Nov 26 22:44:05 crc kubenswrapper[4903]: I1126 22:44:05.550345 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"7d4be6e3-d909-4e4f-b5a0-3c949c02421a","Type":"ContainerStarted","Data":"033db98f1e742cb12d0763b1af5c7e71535427f8451463ec35b7647580b0978c"} Nov 26 22:44:05 crc kubenswrapper[4903]: I1126 22:44:05.550392 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 26 22:44:05 crc kubenswrapper[4903]: I1126 22:44:05.574165 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.574147076 podStartE2EDuration="2.574147076s" podCreationTimestamp="2025-11-26 22:44:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:44:05.564753355 +0000 UTC m=+1374.254988255" watchObservedRunningTime="2025-11-26 22:44:05.574147076 +0000 UTC m=+1374.264381986" Nov 26 22:44:06 crc kubenswrapper[4903]: I1126 22:44:06.047567 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bda57282-6e21-46c1-8e86-382942170138" path="/var/lib/kubelet/pods/bda57282-6e21-46c1-8e86-382942170138/volumes" Nov 26 22:44:06 crc kubenswrapper[4903]: E1126 22:44:06.246991 4903 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f76b4ccbb388ccd60c0394fefe98877a36426ab6ef8e783943c13b74e1ff5cac" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 22:44:06 crc kubenswrapper[4903]: E1126 22:44:06.250313 4903 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f76b4ccbb388ccd60c0394fefe98877a36426ab6ef8e783943c13b74e1ff5cac" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 22:44:06 crc kubenswrapper[4903]: E1126 22:44:06.251983 4903 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f76b4ccbb388ccd60c0394fefe98877a36426ab6ef8e783943c13b74e1ff5cac" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 22:44:06 crc kubenswrapper[4903]: E1126 22:44:06.252048 4903 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="8c1fed03-8447-4f90-8dc0-013a732cb664" containerName="nova-scheduler-scheduler" Nov 26 22:44:06 crc kubenswrapper[4903]: I1126 22:44:06.577753 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bc6693f3-abb4-4ac9-9535-ff92ff9a740e","Type":"ContainerStarted","Data":"4d99fa99533ede36bb177771d6a316669901e8b185dc23c2f02108972c26f575"} Nov 26 22:44:06 crc kubenswrapper[4903]: I1126 22:44:06.577818 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bc6693f3-abb4-4ac9-9535-ff92ff9a740e","Type":"ContainerStarted","Data":"4ebc1924b227f6508b5bdd8b62bb24cba6e5bdbd2e5a87156b89818722ee6c5f"} Nov 26 22:44:06 crc kubenswrapper[4903]: I1126 22:44:06.577829 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bc6693f3-abb4-4ac9-9535-ff92ff9a740e","Type":"ContainerStarted","Data":"17a545282f76d7fb261e86179768215072e0ce65d5bfc651c0d32d5ffe7770da"} Nov 26 22:44:06 crc kubenswrapper[4903]: I1126 22:44:06.601425 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.60140642 podStartE2EDuration="2.60140642s" podCreationTimestamp="2025-11-26 22:44:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:44:06.595166133 +0000 UTC m=+1375.285401043" watchObservedRunningTime="2025-11-26 22:44:06.60140642 +0000 UTC m=+1375.291641330" Nov 26 22:44:07 crc kubenswrapper[4903]: I1126 22:44:07.591366 4903 generic.go:334] "Generic (PLEG): container finished" podID="8c1fed03-8447-4f90-8dc0-013a732cb664" containerID="f76b4ccbb388ccd60c0394fefe98877a36426ab6ef8e783943c13b74e1ff5cac" exitCode=0 Nov 26 22:44:07 crc kubenswrapper[4903]: I1126 22:44:07.591452 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"8c1fed03-8447-4f90-8dc0-013a732cb664","Type":"ContainerDied","Data":"f76b4ccbb388ccd60c0394fefe98877a36426ab6ef8e783943c13b74e1ff5cac"} Nov 26 22:44:09 crc kubenswrapper[4903]: I1126 22:44:09.628559 4903 generic.go:334] "Generic (PLEG): container finished" podID="5889d0d5-76b2-4fda-8b1f-65309d462a62" containerID="9fc4d9f5c0b64cbe7a4dbfaa84620c3746a34ed22b5e74c5c0cdae2197a0dd00" exitCode=0 Nov 26 22:44:09 crc kubenswrapper[4903]: I1126 22:44:09.629079 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5889d0d5-76b2-4fda-8b1f-65309d462a62","Type":"ContainerDied","Data":"9fc4d9f5c0b64cbe7a4dbfaa84620c3746a34ed22b5e74c5c0cdae2197a0dd00"} Nov 26 22:44:09 crc kubenswrapper[4903]: I1126 22:44:09.970643 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 22:44:09 crc kubenswrapper[4903]: I1126 22:44:09.971096 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.021224 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.030880 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.210644 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5889d0d5-76b2-4fda-8b1f-65309d462a62-config-data\") pod \"5889d0d5-76b2-4fda-8b1f-65309d462a62\" (UID: \"5889d0d5-76b2-4fda-8b1f-65309d462a62\") " Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.210718 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jxpv6\" (UniqueName: \"kubernetes.io/projected/8c1fed03-8447-4f90-8dc0-013a732cb664-kube-api-access-jxpv6\") pod \"8c1fed03-8447-4f90-8dc0-013a732cb664\" (UID: \"8c1fed03-8447-4f90-8dc0-013a732cb664\") " Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.210838 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5889d0d5-76b2-4fda-8b1f-65309d462a62-logs\") pod \"5889d0d5-76b2-4fda-8b1f-65309d462a62\" (UID: \"5889d0d5-76b2-4fda-8b1f-65309d462a62\") " Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.210982 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c1fed03-8447-4f90-8dc0-013a732cb664-combined-ca-bundle\") pod \"8c1fed03-8447-4f90-8dc0-013a732cb664\" (UID: \"8c1fed03-8447-4f90-8dc0-013a732cb664\") " Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.211280 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-db7jz\" (UniqueName: \"kubernetes.io/projected/5889d0d5-76b2-4fda-8b1f-65309d462a62-kube-api-access-db7jz\") pod \"5889d0d5-76b2-4fda-8b1f-65309d462a62\" (UID: \"5889d0d5-76b2-4fda-8b1f-65309d462a62\") " Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.211370 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c1fed03-8447-4f90-8dc0-013a732cb664-config-data\") pod \"8c1fed03-8447-4f90-8dc0-013a732cb664\" (UID: \"8c1fed03-8447-4f90-8dc0-013a732cb664\") " Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.211407 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5889d0d5-76b2-4fda-8b1f-65309d462a62-combined-ca-bundle\") pod \"5889d0d5-76b2-4fda-8b1f-65309d462a62\" (UID: \"5889d0d5-76b2-4fda-8b1f-65309d462a62\") " Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.212521 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5889d0d5-76b2-4fda-8b1f-65309d462a62-logs" (OuterVolumeSpecName: "logs") pod "5889d0d5-76b2-4fda-8b1f-65309d462a62" (UID: "5889d0d5-76b2-4fda-8b1f-65309d462a62"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.221808 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5889d0d5-76b2-4fda-8b1f-65309d462a62-kube-api-access-db7jz" (OuterVolumeSpecName: "kube-api-access-db7jz") pod "5889d0d5-76b2-4fda-8b1f-65309d462a62" (UID: "5889d0d5-76b2-4fda-8b1f-65309d462a62"). InnerVolumeSpecName "kube-api-access-db7jz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.223479 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c1fed03-8447-4f90-8dc0-013a732cb664-kube-api-access-jxpv6" (OuterVolumeSpecName: "kube-api-access-jxpv6") pod "8c1fed03-8447-4f90-8dc0-013a732cb664" (UID: "8c1fed03-8447-4f90-8dc0-013a732cb664"). InnerVolumeSpecName "kube-api-access-jxpv6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.253816 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c1fed03-8447-4f90-8dc0-013a732cb664-config-data" (OuterVolumeSpecName: "config-data") pod "8c1fed03-8447-4f90-8dc0-013a732cb664" (UID: "8c1fed03-8447-4f90-8dc0-013a732cb664"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.255052 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5889d0d5-76b2-4fda-8b1f-65309d462a62-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5889d0d5-76b2-4fda-8b1f-65309d462a62" (UID: "5889d0d5-76b2-4fda-8b1f-65309d462a62"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.255620 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5889d0d5-76b2-4fda-8b1f-65309d462a62-config-data" (OuterVolumeSpecName: "config-data") pod "5889d0d5-76b2-4fda-8b1f-65309d462a62" (UID: "5889d0d5-76b2-4fda-8b1f-65309d462a62"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.260662 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c1fed03-8447-4f90-8dc0-013a732cb664-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8c1fed03-8447-4f90-8dc0-013a732cb664" (UID: "8c1fed03-8447-4f90-8dc0-013a732cb664"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.322154 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-db7jz\" (UniqueName: \"kubernetes.io/projected/5889d0d5-76b2-4fda-8b1f-65309d462a62-kube-api-access-db7jz\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.322200 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c1fed03-8447-4f90-8dc0-013a732cb664-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.322213 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5889d0d5-76b2-4fda-8b1f-65309d462a62-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.322224 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5889d0d5-76b2-4fda-8b1f-65309d462a62-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.322232 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jxpv6\" (UniqueName: \"kubernetes.io/projected/8c1fed03-8447-4f90-8dc0-013a732cb664-kube-api-access-jxpv6\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.322241 4903 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5889d0d5-76b2-4fda-8b1f-65309d462a62-logs\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.322250 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c1fed03-8447-4f90-8dc0-013a732cb664-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.641511 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"8c1fed03-8447-4f90-8dc0-013a732cb664","Type":"ContainerDied","Data":"adf6f07ab2907295004d162e0efa0878f3d12dbb816a4d1ebeb8d32ff374a04e"} Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.641569 4903 scope.go:117] "RemoveContainer" containerID="f76b4ccbb388ccd60c0394fefe98877a36426ab6ef8e783943c13b74e1ff5cac" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.641577 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.645915 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.645928 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5889d0d5-76b2-4fda-8b1f-65309d462a62","Type":"ContainerDied","Data":"92526742a0150f1e1b0d5dd2dbcb1d4da966000173495b4f1396e1a1097796b0"} Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.648952 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-njqgv" event={"ID":"45c66290-f98d-4c93-ac95-b63cd9e0777c","Type":"ContainerStarted","Data":"df5fd9053af2144ab644d9cc231421b4dcd15c4c6fe9470436291ae62e134857"} Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.669172 4903 scope.go:117] "RemoveContainer" containerID="9fc4d9f5c0b64cbe7a4dbfaa84620c3746a34ed22b5e74c5c0cdae2197a0dd00" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.685241 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-njqgv" podStartSLOduration=2.5109307960000002 podStartE2EDuration="9.685226999s" podCreationTimestamp="2025-11-26 22:44:01 +0000 UTC" firstStartedPulling="2025-11-26 22:44:02.453496853 +0000 UTC m=+1371.143731763" lastFinishedPulling="2025-11-26 22:44:09.627793056 +0000 UTC m=+1378.318027966" observedRunningTime="2025-11-26 22:44:10.681979022 +0000 UTC m=+1379.372213932" watchObservedRunningTime="2025-11-26 22:44:10.685226999 +0000 UTC m=+1379.375461909" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.708921 4903 scope.go:117] "RemoveContainer" containerID="dbdf7a938535a4f190cc734bf159cd24a096fb0decb75b167b58ad5864ed9e39" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.715249 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.734896 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.750152 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.766535 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.781665 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 22:44:10 crc kubenswrapper[4903]: E1126 22:44:10.782269 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5889d0d5-76b2-4fda-8b1f-65309d462a62" containerName="nova-api-api" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.782289 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="5889d0d5-76b2-4fda-8b1f-65309d462a62" containerName="nova-api-api" Nov 26 22:44:10 crc kubenswrapper[4903]: E1126 22:44:10.782302 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5889d0d5-76b2-4fda-8b1f-65309d462a62" containerName="nova-api-log" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.782309 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="5889d0d5-76b2-4fda-8b1f-65309d462a62" containerName="nova-api-log" Nov 26 22:44:10 crc kubenswrapper[4903]: E1126 22:44:10.782323 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c1fed03-8447-4f90-8dc0-013a732cb664" containerName="nova-scheduler-scheduler" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.782329 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c1fed03-8447-4f90-8dc0-013a732cb664" containerName="nova-scheduler-scheduler" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.782605 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="5889d0d5-76b2-4fda-8b1f-65309d462a62" containerName="nova-api-api" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.782619 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="5889d0d5-76b2-4fda-8b1f-65309d462a62" containerName="nova-api-log" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.782639 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c1fed03-8447-4f90-8dc0-013a732cb664" containerName="nova-scheduler-scheduler" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.785418 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.804100 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.806940 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.824835 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.826480 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.867783 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.868211 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.870778 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/400ca6f8-5d9f-4da1-94ea-658585d32f21-config-data\") pod \"nova-api-0\" (UID: \"400ca6f8-5d9f-4da1-94ea-658585d32f21\") " pod="openstack/nova-api-0" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.870990 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/400ca6f8-5d9f-4da1-94ea-658585d32f21-logs\") pod \"nova-api-0\" (UID: \"400ca6f8-5d9f-4da1-94ea-658585d32f21\") " pod="openstack/nova-api-0" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.871023 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/400ca6f8-5d9f-4da1-94ea-658585d32f21-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"400ca6f8-5d9f-4da1-94ea-658585d32f21\") " pod="openstack/nova-api-0" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.871052 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dddg4\" (UniqueName: \"kubernetes.io/projected/400ca6f8-5d9f-4da1-94ea-658585d32f21-kube-api-access-dddg4\") pod \"nova-api-0\" (UID: \"400ca6f8-5d9f-4da1-94ea-658585d32f21\") " pod="openstack/nova-api-0" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.934870 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.949365 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.953076 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.963015 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.975056 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/218f4c78-00c2-47c5-aecb-d58e14e73b0c-config-data\") pod \"nova-scheduler-0\" (UID: \"218f4c78-00c2-47c5-aecb-d58e14e73b0c\") " pod="openstack/nova-scheduler-0" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.975102 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/400ca6f8-5d9f-4da1-94ea-658585d32f21-logs\") pod \"nova-api-0\" (UID: \"400ca6f8-5d9f-4da1-94ea-658585d32f21\") " pod="openstack/nova-api-0" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.975122 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/400ca6f8-5d9f-4da1-94ea-658585d32f21-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"400ca6f8-5d9f-4da1-94ea-658585d32f21\") " pod="openstack/nova-api-0" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.975140 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dddg4\" (UniqueName: \"kubernetes.io/projected/400ca6f8-5d9f-4da1-94ea-658585d32f21-kube-api-access-dddg4\") pod \"nova-api-0\" (UID: \"400ca6f8-5d9f-4da1-94ea-658585d32f21\") " pod="openstack/nova-api-0" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.975205 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzqz5\" (UniqueName: \"kubernetes.io/projected/218f4c78-00c2-47c5-aecb-d58e14e73b0c-kube-api-access-lzqz5\") pod \"nova-scheduler-0\" (UID: \"218f4c78-00c2-47c5-aecb-d58e14e73b0c\") " pod="openstack/nova-scheduler-0" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.975242 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/400ca6f8-5d9f-4da1-94ea-658585d32f21-config-data\") pod \"nova-api-0\" (UID: \"400ca6f8-5d9f-4da1-94ea-658585d32f21\") " pod="openstack/nova-api-0" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.975298 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/218f4c78-00c2-47c5-aecb-d58e14e73b0c-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"218f4c78-00c2-47c5-aecb-d58e14e73b0c\") " pod="openstack/nova-scheduler-0" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.999233 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/400ca6f8-5d9f-4da1-94ea-658585d32f21-logs\") pod \"nova-api-0\" (UID: \"400ca6f8-5d9f-4da1-94ea-658585d32f21\") " pod="openstack/nova-api-0" Nov 26 22:44:10 crc kubenswrapper[4903]: I1126 22:44:10.999816 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 26 22:44:11 crc kubenswrapper[4903]: I1126 22:44:11.005303 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/400ca6f8-5d9f-4da1-94ea-658585d32f21-config-data\") pod \"nova-api-0\" (UID: \"400ca6f8-5d9f-4da1-94ea-658585d32f21\") " pod="openstack/nova-api-0" Nov 26 22:44:11 crc kubenswrapper[4903]: I1126 22:44:11.005746 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/400ca6f8-5d9f-4da1-94ea-658585d32f21-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"400ca6f8-5d9f-4da1-94ea-658585d32f21\") " pod="openstack/nova-api-0" Nov 26 22:44:11 crc kubenswrapper[4903]: I1126 22:44:11.043541 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dddg4\" (UniqueName: \"kubernetes.io/projected/400ca6f8-5d9f-4da1-94ea-658585d32f21-kube-api-access-dddg4\") pod \"nova-api-0\" (UID: \"400ca6f8-5d9f-4da1-94ea-658585d32f21\") " pod="openstack/nova-api-0" Nov 26 22:44:11 crc kubenswrapper[4903]: I1126 22:44:11.077128 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7611e150-3e34-49ba-ab46-f91641af3cb2-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"7611e150-3e34-49ba-ab46-f91641af3cb2\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 22:44:11 crc kubenswrapper[4903]: I1126 22:44:11.077182 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/218f4c78-00c2-47c5-aecb-d58e14e73b0c-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"218f4c78-00c2-47c5-aecb-d58e14e73b0c\") " pod="openstack/nova-scheduler-0" Nov 26 22:44:11 crc kubenswrapper[4903]: I1126 22:44:11.077241 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7611e150-3e34-49ba-ab46-f91641af3cb2-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"7611e150-3e34-49ba-ab46-f91641af3cb2\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 22:44:11 crc kubenswrapper[4903]: I1126 22:44:11.077310 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/218f4c78-00c2-47c5-aecb-d58e14e73b0c-config-data\") pod \"nova-scheduler-0\" (UID: \"218f4c78-00c2-47c5-aecb-d58e14e73b0c\") " pod="openstack/nova-scheduler-0" Nov 26 22:44:11 crc kubenswrapper[4903]: I1126 22:44:11.077367 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzqz5\" (UniqueName: \"kubernetes.io/projected/218f4c78-00c2-47c5-aecb-d58e14e73b0c-kube-api-access-lzqz5\") pod \"nova-scheduler-0\" (UID: \"218f4c78-00c2-47c5-aecb-d58e14e73b0c\") " pod="openstack/nova-scheduler-0" Nov 26 22:44:11 crc kubenswrapper[4903]: I1126 22:44:11.080886 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/218f4c78-00c2-47c5-aecb-d58e14e73b0c-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"218f4c78-00c2-47c5-aecb-d58e14e73b0c\") " pod="openstack/nova-scheduler-0" Nov 26 22:44:11 crc kubenswrapper[4903]: I1126 22:44:11.081370 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/218f4c78-00c2-47c5-aecb-d58e14e73b0c-config-data\") pod \"nova-scheduler-0\" (UID: \"218f4c78-00c2-47c5-aecb-d58e14e73b0c\") " pod="openstack/nova-scheduler-0" Nov 26 22:44:11 crc kubenswrapper[4903]: I1126 22:44:11.097394 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzqz5\" (UniqueName: \"kubernetes.io/projected/218f4c78-00c2-47c5-aecb-d58e14e73b0c-kube-api-access-lzqz5\") pod \"nova-scheduler-0\" (UID: \"218f4c78-00c2-47c5-aecb-d58e14e73b0c\") " pod="openstack/nova-scheduler-0" Nov 26 22:44:11 crc kubenswrapper[4903]: I1126 22:44:11.117159 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 22:44:11 crc kubenswrapper[4903]: I1126 22:44:11.179798 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7611e150-3e34-49ba-ab46-f91641af3cb2-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"7611e150-3e34-49ba-ab46-f91641af3cb2\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 22:44:11 crc kubenswrapper[4903]: I1126 22:44:11.179900 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7611e150-3e34-49ba-ab46-f91641af3cb2-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"7611e150-3e34-49ba-ab46-f91641af3cb2\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 22:44:11 crc kubenswrapper[4903]: I1126 22:44:11.182715 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7611e150-3e34-49ba-ab46-f91641af3cb2-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"7611e150-3e34-49ba-ab46-f91641af3cb2\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 22:44:11 crc kubenswrapper[4903]: I1126 22:44:11.209548 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7611e150-3e34-49ba-ab46-f91641af3cb2-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"7611e150-3e34-49ba-ab46-f91641af3cb2\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 22:44:11 crc kubenswrapper[4903]: I1126 22:44:11.239687 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 22:44:11 crc kubenswrapper[4903]: I1126 22:44:11.250947 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 22:44:11 crc kubenswrapper[4903]: I1126 22:44:11.618186 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 22:44:11 crc kubenswrapper[4903]: I1126 22:44:11.677018 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"400ca6f8-5d9f-4da1-94ea-658585d32f21","Type":"ContainerStarted","Data":"3ad52ba33a074beb83e8828621048a596367d69d6d23e0e880830d10c251c0fc"} Nov 26 22:44:11 crc kubenswrapper[4903]: I1126 22:44:11.733880 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 26 22:44:11 crc kubenswrapper[4903]: W1126 22:44:11.743044 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod7611e150_3e34_49ba_ab46_f91641af3cb2.slice/crio-7b1c7995e1ec02adc70dd6f3ee574a39e721f6a9514e4d673e43c51ed098a434 WatchSource:0}: Error finding container 7b1c7995e1ec02adc70dd6f3ee574a39e721f6a9514e4d673e43c51ed098a434: Status 404 returned error can't find the container with id 7b1c7995e1ec02adc70dd6f3ee574a39e721f6a9514e4d673e43c51ed098a434 Nov 26 22:44:11 crc kubenswrapper[4903]: I1126 22:44:11.844721 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 22:44:11 crc kubenswrapper[4903]: W1126 22:44:11.853725 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod218f4c78_00c2_47c5_aecb_d58e14e73b0c.slice/crio-e22f54b7508958228628f698b1ae04a7d889ff8cbebc52091dac62b779e15236 WatchSource:0}: Error finding container e22f54b7508958228628f698b1ae04a7d889ff8cbebc52091dac62b779e15236: Status 404 returned error can't find the container with id e22f54b7508958228628f698b1ae04a7d889ff8cbebc52091dac62b779e15236 Nov 26 22:44:12 crc kubenswrapper[4903]: I1126 22:44:12.046194 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5889d0d5-76b2-4fda-8b1f-65309d462a62" path="/var/lib/kubelet/pods/5889d0d5-76b2-4fda-8b1f-65309d462a62/volumes" Nov 26 22:44:12 crc kubenswrapper[4903]: I1126 22:44:12.048551 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8c1fed03-8447-4f90-8dc0-013a732cb664" path="/var/lib/kubelet/pods/8c1fed03-8447-4f90-8dc0-013a732cb664/volumes" Nov 26 22:44:12 crc kubenswrapper[4903]: I1126 22:44:12.701838 4903 generic.go:334] "Generic (PLEG): container finished" podID="45c66290-f98d-4c93-ac95-b63cd9e0777c" containerID="df5fd9053af2144ab644d9cc231421b4dcd15c4c6fe9470436291ae62e134857" exitCode=0 Nov 26 22:44:12 crc kubenswrapper[4903]: I1126 22:44:12.701970 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-njqgv" event={"ID":"45c66290-f98d-4c93-ac95-b63cd9e0777c","Type":"ContainerDied","Data":"df5fd9053af2144ab644d9cc231421b4dcd15c4c6fe9470436291ae62e134857"} Nov 26 22:44:12 crc kubenswrapper[4903]: I1126 22:44:12.706494 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"218f4c78-00c2-47c5-aecb-d58e14e73b0c","Type":"ContainerStarted","Data":"b768da7d634e11040926ffd9959a928c6247655a4da938e79d6b0c76f3892d4b"} Nov 26 22:44:12 crc kubenswrapper[4903]: I1126 22:44:12.706542 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"218f4c78-00c2-47c5-aecb-d58e14e73b0c","Type":"ContainerStarted","Data":"e22f54b7508958228628f698b1ae04a7d889ff8cbebc52091dac62b779e15236"} Nov 26 22:44:12 crc kubenswrapper[4903]: I1126 22:44:12.708843 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"400ca6f8-5d9f-4da1-94ea-658585d32f21","Type":"ContainerStarted","Data":"290f6900ade24159f2d5c047b2d7b83e3de31485f75aede21928f14f742fddac"} Nov 26 22:44:12 crc kubenswrapper[4903]: I1126 22:44:12.709055 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"400ca6f8-5d9f-4da1-94ea-658585d32f21","Type":"ContainerStarted","Data":"914b6bfe9b5f5703737c2612d6a3adbd764425a70140f642413306a9ccff16c7"} Nov 26 22:44:12 crc kubenswrapper[4903]: I1126 22:44:12.711091 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"7611e150-3e34-49ba-ab46-f91641af3cb2","Type":"ContainerStarted","Data":"2f26dcf66f29e8f52abd7be1066c63fd36d928e769177a933ab1f15fa1f9a4d9"} Nov 26 22:44:12 crc kubenswrapper[4903]: I1126 22:44:12.711124 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"7611e150-3e34-49ba-ab46-f91641af3cb2","Type":"ContainerStarted","Data":"7b1c7995e1ec02adc70dd6f3ee574a39e721f6a9514e4d673e43c51ed098a434"} Nov 26 22:44:12 crc kubenswrapper[4903]: I1126 22:44:12.753593 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=2.753570097 podStartE2EDuration="2.753570097s" podCreationTimestamp="2025-11-26 22:44:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:44:12.737931168 +0000 UTC m=+1381.428166078" watchObservedRunningTime="2025-11-26 22:44:12.753570097 +0000 UTC m=+1381.443805007" Nov 26 22:44:12 crc kubenswrapper[4903]: I1126 22:44:12.769932 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.769913816 podStartE2EDuration="2.769913816s" podCreationTimestamp="2025-11-26 22:44:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:44:12.762135377 +0000 UTC m=+1381.452370317" watchObservedRunningTime="2025-11-26 22:44:12.769913816 +0000 UTC m=+1381.460148726" Nov 26 22:44:12 crc kubenswrapper[4903]: I1126 22:44:12.792421 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.792403079 podStartE2EDuration="2.792403079s" podCreationTimestamp="2025-11-26 22:44:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:44:12.783714546 +0000 UTC m=+1381.473949466" watchObservedRunningTime="2025-11-26 22:44:12.792403079 +0000 UTC m=+1381.482637989" Nov 26 22:44:13 crc kubenswrapper[4903]: I1126 22:44:13.736438 4903 generic.go:334] "Generic (PLEG): container finished" podID="7611e150-3e34-49ba-ab46-f91641af3cb2" containerID="2f26dcf66f29e8f52abd7be1066c63fd36d928e769177a933ab1f15fa1f9a4d9" exitCode=0 Nov 26 22:44:13 crc kubenswrapper[4903]: I1126 22:44:13.736543 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"7611e150-3e34-49ba-ab46-f91641af3cb2","Type":"ContainerDied","Data":"2f26dcf66f29e8f52abd7be1066c63fd36d928e769177a933ab1f15fa1f9a4d9"} Nov 26 22:44:13 crc kubenswrapper[4903]: I1126 22:44:13.772632 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 26 22:44:14 crc kubenswrapper[4903]: I1126 22:44:14.305864 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-njqgv" Nov 26 22:44:14 crc kubenswrapper[4903]: I1126 22:44:14.484937 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45c66290-f98d-4c93-ac95-b63cd9e0777c-scripts\") pod \"45c66290-f98d-4c93-ac95-b63cd9e0777c\" (UID: \"45c66290-f98d-4c93-ac95-b63cd9e0777c\") " Nov 26 22:44:14 crc kubenswrapper[4903]: I1126 22:44:14.485203 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45c66290-f98d-4c93-ac95-b63cd9e0777c-config-data\") pod \"45c66290-f98d-4c93-ac95-b63cd9e0777c\" (UID: \"45c66290-f98d-4c93-ac95-b63cd9e0777c\") " Nov 26 22:44:14 crc kubenswrapper[4903]: I1126 22:44:14.485481 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6k6kk\" (UniqueName: \"kubernetes.io/projected/45c66290-f98d-4c93-ac95-b63cd9e0777c-kube-api-access-6k6kk\") pod \"45c66290-f98d-4c93-ac95-b63cd9e0777c\" (UID: \"45c66290-f98d-4c93-ac95-b63cd9e0777c\") " Nov 26 22:44:14 crc kubenswrapper[4903]: I1126 22:44:14.485560 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45c66290-f98d-4c93-ac95-b63cd9e0777c-combined-ca-bundle\") pod \"45c66290-f98d-4c93-ac95-b63cd9e0777c\" (UID: \"45c66290-f98d-4c93-ac95-b63cd9e0777c\") " Nov 26 22:44:14 crc kubenswrapper[4903]: I1126 22:44:14.496491 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45c66290-f98d-4c93-ac95-b63cd9e0777c-scripts" (OuterVolumeSpecName: "scripts") pod "45c66290-f98d-4c93-ac95-b63cd9e0777c" (UID: "45c66290-f98d-4c93-ac95-b63cd9e0777c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:14 crc kubenswrapper[4903]: I1126 22:44:14.496512 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45c66290-f98d-4c93-ac95-b63cd9e0777c-kube-api-access-6k6kk" (OuterVolumeSpecName: "kube-api-access-6k6kk") pod "45c66290-f98d-4c93-ac95-b63cd9e0777c" (UID: "45c66290-f98d-4c93-ac95-b63cd9e0777c"). InnerVolumeSpecName "kube-api-access-6k6kk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:44:14 crc kubenswrapper[4903]: I1126 22:44:14.521942 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45c66290-f98d-4c93-ac95-b63cd9e0777c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "45c66290-f98d-4c93-ac95-b63cd9e0777c" (UID: "45c66290-f98d-4c93-ac95-b63cd9e0777c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:14 crc kubenswrapper[4903]: I1126 22:44:14.526949 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45c66290-f98d-4c93-ac95-b63cd9e0777c-config-data" (OuterVolumeSpecName: "config-data") pod "45c66290-f98d-4c93-ac95-b63cd9e0777c" (UID: "45c66290-f98d-4c93-ac95-b63cd9e0777c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:14 crc kubenswrapper[4903]: I1126 22:44:14.588413 4903 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45c66290-f98d-4c93-ac95-b63cd9e0777c-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:14 crc kubenswrapper[4903]: I1126 22:44:14.588442 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45c66290-f98d-4c93-ac95-b63cd9e0777c-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:14 crc kubenswrapper[4903]: I1126 22:44:14.588454 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6k6kk\" (UniqueName: \"kubernetes.io/projected/45c66290-f98d-4c93-ac95-b63cd9e0777c-kube-api-access-6k6kk\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:14 crc kubenswrapper[4903]: I1126 22:44:14.588472 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45c66290-f98d-4c93-ac95-b63cd9e0777c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:14 crc kubenswrapper[4903]: I1126 22:44:14.747488 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-njqgv" Nov 26 22:44:14 crc kubenswrapper[4903]: I1126 22:44:14.752811 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-njqgv" event={"ID":"45c66290-f98d-4c93-ac95-b63cd9e0777c","Type":"ContainerDied","Data":"9f0683ad32227bd8797ffd143ab09f9ed45976b89c2d8fac835cccae3e9a311f"} Nov 26 22:44:14 crc kubenswrapper[4903]: I1126 22:44:14.753131 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9f0683ad32227bd8797ffd143ab09f9ed45976b89c2d8fac835cccae3e9a311f" Nov 26 22:44:14 crc kubenswrapper[4903]: I1126 22:44:14.970810 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 22:44:14 crc kubenswrapper[4903]: I1126 22:44:14.970893 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 22:44:15 crc kubenswrapper[4903]: I1126 22:44:15.065419 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 22:44:15 crc kubenswrapper[4903]: I1126 22:44:15.207001 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7611e150-3e34-49ba-ab46-f91641af3cb2-kube-api-access\") pod \"7611e150-3e34-49ba-ab46-f91641af3cb2\" (UID: \"7611e150-3e34-49ba-ab46-f91641af3cb2\") " Nov 26 22:44:15 crc kubenswrapper[4903]: I1126 22:44:15.207504 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7611e150-3e34-49ba-ab46-f91641af3cb2-kubelet-dir\") pod \"7611e150-3e34-49ba-ab46-f91641af3cb2\" (UID: \"7611e150-3e34-49ba-ab46-f91641af3cb2\") " Nov 26 22:44:15 crc kubenswrapper[4903]: I1126 22:44:15.210106 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7611e150-3e34-49ba-ab46-f91641af3cb2-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "7611e150-3e34-49ba-ab46-f91641af3cb2" (UID: "7611e150-3e34-49ba-ab46-f91641af3cb2"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:44:15 crc kubenswrapper[4903]: I1126 22:44:15.212759 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7611e150-3e34-49ba-ab46-f91641af3cb2-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "7611e150-3e34-49ba-ab46-f91641af3cb2" (UID: "7611e150-3e34-49ba-ab46-f91641af3cb2"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:44:15 crc kubenswrapper[4903]: I1126 22:44:15.310652 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7611e150-3e34-49ba-ab46-f91641af3cb2-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:15 crc kubenswrapper[4903]: I1126 22:44:15.311079 4903 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7611e150-3e34-49ba-ab46-f91641af3cb2-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:15 crc kubenswrapper[4903]: I1126 22:44:15.774479 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"7611e150-3e34-49ba-ab46-f91641af3cb2","Type":"ContainerDied","Data":"7b1c7995e1ec02adc70dd6f3ee574a39e721f6a9514e4d673e43c51ed098a434"} Nov 26 22:44:15 crc kubenswrapper[4903]: I1126 22:44:15.774517 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7b1c7995e1ec02adc70dd6f3ee574a39e721f6a9514e4d673e43c51ed098a434" Nov 26 22:44:15 crc kubenswrapper[4903]: I1126 22:44:15.774570 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 22:44:15 crc kubenswrapper[4903]: I1126 22:44:15.794880 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 26 22:44:15 crc kubenswrapper[4903]: E1126 22:44:15.795413 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7611e150-3e34-49ba-ab46-f91641af3cb2" containerName="pruner" Nov 26 22:44:15 crc kubenswrapper[4903]: I1126 22:44:15.795431 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="7611e150-3e34-49ba-ab46-f91641af3cb2" containerName="pruner" Nov 26 22:44:15 crc kubenswrapper[4903]: E1126 22:44:15.795459 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45c66290-f98d-4c93-ac95-b63cd9e0777c" containerName="aodh-db-sync" Nov 26 22:44:15 crc kubenswrapper[4903]: I1126 22:44:15.795468 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="45c66290-f98d-4c93-ac95-b63cd9e0777c" containerName="aodh-db-sync" Nov 26 22:44:15 crc kubenswrapper[4903]: I1126 22:44:15.795724 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="45c66290-f98d-4c93-ac95-b63cd9e0777c" containerName="aodh-db-sync" Nov 26 22:44:15 crc kubenswrapper[4903]: I1126 22:44:15.795738 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="7611e150-3e34-49ba-ab46-f91641af3cb2" containerName="pruner" Nov 26 22:44:15 crc kubenswrapper[4903]: I1126 22:44:15.796588 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 26 22:44:15 crc kubenswrapper[4903]: I1126 22:44:15.798946 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 26 22:44:15 crc kubenswrapper[4903]: I1126 22:44:15.799037 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 26 22:44:15 crc kubenswrapper[4903]: I1126 22:44:15.813818 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 26 22:44:15 crc kubenswrapper[4903]: I1126 22:44:15.925242 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/d22f4962-7fe8-4565-92df-3316c71e2079-var-lock\") pod \"installer-9-crc\" (UID: \"d22f4962-7fe8-4565-92df-3316c71e2079\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 22:44:15 crc kubenswrapper[4903]: I1126 22:44:15.925318 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d22f4962-7fe8-4565-92df-3316c71e2079-kube-api-access\") pod \"installer-9-crc\" (UID: \"d22f4962-7fe8-4565-92df-3316c71e2079\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 22:44:15 crc kubenswrapper[4903]: I1126 22:44:15.925708 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d22f4962-7fe8-4565-92df-3316c71e2079-kubelet-dir\") pod \"installer-9-crc\" (UID: \"d22f4962-7fe8-4565-92df-3316c71e2079\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 22:44:15 crc kubenswrapper[4903]: I1126 22:44:15.991859 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="bc6693f3-abb4-4ac9-9535-ff92ff9a740e" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.239:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 22:44:15 crc kubenswrapper[4903]: I1126 22:44:15.991872 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="bc6693f3-abb4-4ac9-9535-ff92ff9a740e" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.239:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 22:44:16 crc kubenswrapper[4903]: I1126 22:44:16.027722 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d22f4962-7fe8-4565-92df-3316c71e2079-kubelet-dir\") pod \"installer-9-crc\" (UID: \"d22f4962-7fe8-4565-92df-3316c71e2079\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 22:44:16 crc kubenswrapper[4903]: I1126 22:44:16.027966 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/d22f4962-7fe8-4565-92df-3316c71e2079-var-lock\") pod \"installer-9-crc\" (UID: \"d22f4962-7fe8-4565-92df-3316c71e2079\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 22:44:16 crc kubenswrapper[4903]: I1126 22:44:16.028021 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d22f4962-7fe8-4565-92df-3316c71e2079-kube-api-access\") pod \"installer-9-crc\" (UID: \"d22f4962-7fe8-4565-92df-3316c71e2079\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 22:44:16 crc kubenswrapper[4903]: I1126 22:44:16.028474 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d22f4962-7fe8-4565-92df-3316c71e2079-kubelet-dir\") pod \"installer-9-crc\" (UID: \"d22f4962-7fe8-4565-92df-3316c71e2079\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 22:44:16 crc kubenswrapper[4903]: I1126 22:44:16.028558 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/d22f4962-7fe8-4565-92df-3316c71e2079-var-lock\") pod \"installer-9-crc\" (UID: \"d22f4962-7fe8-4565-92df-3316c71e2079\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 22:44:16 crc kubenswrapper[4903]: I1126 22:44:16.045263 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d22f4962-7fe8-4565-92df-3316c71e2079-kube-api-access\") pod \"installer-9-crc\" (UID: \"d22f4962-7fe8-4565-92df-3316c71e2079\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 22:44:16 crc kubenswrapper[4903]: I1126 22:44:16.127851 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 26 22:44:16 crc kubenswrapper[4903]: I1126 22:44:16.251799 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 26 22:44:16 crc kubenswrapper[4903]: I1126 22:44:16.630209 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 26 22:44:16 crc kubenswrapper[4903]: I1126 22:44:16.791744 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"d22f4962-7fe8-4565-92df-3316c71e2079","Type":"ContainerStarted","Data":"fae5c30b01a08b631984a28115d2e1a9d6e35b5b081830ed7151bb47121ac1d2"} Nov 26 22:44:17 crc kubenswrapper[4903]: I1126 22:44:17.254455 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Nov 26 22:44:17 crc kubenswrapper[4903]: I1126 22:44:17.258661 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 26 22:44:17 crc kubenswrapper[4903]: I1126 22:44:17.264117 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 26 22:44:17 crc kubenswrapper[4903]: I1126 22:44:17.264784 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-pkbb4" Nov 26 22:44:17 crc kubenswrapper[4903]: I1126 22:44:17.267010 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 26 22:44:17 crc kubenswrapper[4903]: I1126 22:44:17.272118 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 26 22:44:17 crc kubenswrapper[4903]: I1126 22:44:17.358208 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05d39c3e-b34f-42ae-ad74-b18e1fd0fced-config-data\") pod \"aodh-0\" (UID: \"05d39c3e-b34f-42ae-ad74-b18e1fd0fced\") " pod="openstack/aodh-0" Nov 26 22:44:17 crc kubenswrapper[4903]: I1126 22:44:17.358335 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-99wml\" (UniqueName: \"kubernetes.io/projected/05d39c3e-b34f-42ae-ad74-b18e1fd0fced-kube-api-access-99wml\") pod \"aodh-0\" (UID: \"05d39c3e-b34f-42ae-ad74-b18e1fd0fced\") " pod="openstack/aodh-0" Nov 26 22:44:17 crc kubenswrapper[4903]: I1126 22:44:17.358368 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05d39c3e-b34f-42ae-ad74-b18e1fd0fced-scripts\") pod \"aodh-0\" (UID: \"05d39c3e-b34f-42ae-ad74-b18e1fd0fced\") " pod="openstack/aodh-0" Nov 26 22:44:17 crc kubenswrapper[4903]: I1126 22:44:17.358395 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05d39c3e-b34f-42ae-ad74-b18e1fd0fced-combined-ca-bundle\") pod \"aodh-0\" (UID: \"05d39c3e-b34f-42ae-ad74-b18e1fd0fced\") " pod="openstack/aodh-0" Nov 26 22:44:17 crc kubenswrapper[4903]: I1126 22:44:17.460683 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05d39c3e-b34f-42ae-ad74-b18e1fd0fced-config-data\") pod \"aodh-0\" (UID: \"05d39c3e-b34f-42ae-ad74-b18e1fd0fced\") " pod="openstack/aodh-0" Nov 26 22:44:17 crc kubenswrapper[4903]: I1126 22:44:17.460795 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-99wml\" (UniqueName: \"kubernetes.io/projected/05d39c3e-b34f-42ae-ad74-b18e1fd0fced-kube-api-access-99wml\") pod \"aodh-0\" (UID: \"05d39c3e-b34f-42ae-ad74-b18e1fd0fced\") " pod="openstack/aodh-0" Nov 26 22:44:17 crc kubenswrapper[4903]: I1126 22:44:17.460817 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05d39c3e-b34f-42ae-ad74-b18e1fd0fced-scripts\") pod \"aodh-0\" (UID: \"05d39c3e-b34f-42ae-ad74-b18e1fd0fced\") " pod="openstack/aodh-0" Nov 26 22:44:17 crc kubenswrapper[4903]: I1126 22:44:17.460836 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05d39c3e-b34f-42ae-ad74-b18e1fd0fced-combined-ca-bundle\") pod \"aodh-0\" (UID: \"05d39c3e-b34f-42ae-ad74-b18e1fd0fced\") " pod="openstack/aodh-0" Nov 26 22:44:17 crc kubenswrapper[4903]: I1126 22:44:17.469275 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05d39c3e-b34f-42ae-ad74-b18e1fd0fced-scripts\") pod \"aodh-0\" (UID: \"05d39c3e-b34f-42ae-ad74-b18e1fd0fced\") " pod="openstack/aodh-0" Nov 26 22:44:17 crc kubenswrapper[4903]: I1126 22:44:17.470576 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05d39c3e-b34f-42ae-ad74-b18e1fd0fced-config-data\") pod \"aodh-0\" (UID: \"05d39c3e-b34f-42ae-ad74-b18e1fd0fced\") " pod="openstack/aodh-0" Nov 26 22:44:17 crc kubenswrapper[4903]: I1126 22:44:17.476373 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05d39c3e-b34f-42ae-ad74-b18e1fd0fced-combined-ca-bundle\") pod \"aodh-0\" (UID: \"05d39c3e-b34f-42ae-ad74-b18e1fd0fced\") " pod="openstack/aodh-0" Nov 26 22:44:17 crc kubenswrapper[4903]: I1126 22:44:17.488867 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-99wml\" (UniqueName: \"kubernetes.io/projected/05d39c3e-b34f-42ae-ad74-b18e1fd0fced-kube-api-access-99wml\") pod \"aodh-0\" (UID: \"05d39c3e-b34f-42ae-ad74-b18e1fd0fced\") " pod="openstack/aodh-0" Nov 26 22:44:17 crc kubenswrapper[4903]: I1126 22:44:17.591952 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 26 22:44:17 crc kubenswrapper[4903]: I1126 22:44:17.810551 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"d22f4962-7fe8-4565-92df-3316c71e2079","Type":"ContainerStarted","Data":"93194c7804ba81ee6366335707d52c163ec4a460a87469b3e70d1f40135d5ba6"} Nov 26 22:44:18 crc kubenswrapper[4903]: I1126 22:44:18.175292 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=3.175274872 podStartE2EDuration="3.175274872s" podCreationTimestamp="2025-11-26 22:44:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:44:17.829449686 +0000 UTC m=+1386.519684666" watchObservedRunningTime="2025-11-26 22:44:18.175274872 +0000 UTC m=+1386.865509782" Nov 26 22:44:18 crc kubenswrapper[4903]: I1126 22:44:18.188261 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 26 22:44:18 crc kubenswrapper[4903]: I1126 22:44:18.831679 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"05d39c3e-b34f-42ae-ad74-b18e1fd0fced","Type":"ContainerStarted","Data":"89c2f1c24ae6aa9086bcb22b34860a5b735a035ed4798f4af663f97736404f08"} Nov 26 22:44:19 crc kubenswrapper[4903]: I1126 22:44:19.790242 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:44:19 crc kubenswrapper[4903]: I1126 22:44:19.790827 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fab088e1-6d09-4778-bc6c-1ee9ef695207" containerName="ceilometer-central-agent" containerID="cri-o://4636538c68e79519927e1bdd996a1c06be143b6c3a33711ec80082fb835e046c" gracePeriod=30 Nov 26 22:44:19 crc kubenswrapper[4903]: I1126 22:44:19.790899 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fab088e1-6d09-4778-bc6c-1ee9ef695207" containerName="sg-core" containerID="cri-o://5fe72152458ad2fc3fb198ee0ff3d0000dae3ae3763f3b2d5035bc605b501131" gracePeriod=30 Nov 26 22:44:19 crc kubenswrapper[4903]: I1126 22:44:19.790940 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fab088e1-6d09-4778-bc6c-1ee9ef695207" containerName="ceilometer-notification-agent" containerID="cri-o://e35851a05f8cf98c55df9dbf6d7af444e0a39f90b8a2688daacb5d9f48067cd6" gracePeriod=30 Nov 26 22:44:19 crc kubenswrapper[4903]: I1126 22:44:19.791014 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fab088e1-6d09-4778-bc6c-1ee9ef695207" containerName="proxy-httpd" containerID="cri-o://283b2c8afa43d21ee775601d8bd97c7c93bc3a6db8a957b602792cb389e98a8f" gracePeriod=30 Nov 26 22:44:19 crc kubenswrapper[4903]: I1126 22:44:19.819523 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="fab088e1-6d09-4778-bc6c-1ee9ef695207" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 502" Nov 26 22:44:19 crc kubenswrapper[4903]: I1126 22:44:19.861496 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"05d39c3e-b34f-42ae-ad74-b18e1fd0fced","Type":"ContainerStarted","Data":"1bcbf8c97528e31716b7273f2ed140fd10e0e7cb9cb0eeb0ac0cb99627562fd5"} Nov 26 22:44:20 crc kubenswrapper[4903]: I1126 22:44:20.469133 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Nov 26 22:44:20 crc kubenswrapper[4903]: I1126 22:44:20.875049 4903 generic.go:334] "Generic (PLEG): container finished" podID="fab088e1-6d09-4778-bc6c-1ee9ef695207" containerID="283b2c8afa43d21ee775601d8bd97c7c93bc3a6db8a957b602792cb389e98a8f" exitCode=0 Nov 26 22:44:20 crc kubenswrapper[4903]: I1126 22:44:20.875078 4903 generic.go:334] "Generic (PLEG): container finished" podID="fab088e1-6d09-4778-bc6c-1ee9ef695207" containerID="5fe72152458ad2fc3fb198ee0ff3d0000dae3ae3763f3b2d5035bc605b501131" exitCode=2 Nov 26 22:44:20 crc kubenswrapper[4903]: I1126 22:44:20.875086 4903 generic.go:334] "Generic (PLEG): container finished" podID="fab088e1-6d09-4778-bc6c-1ee9ef695207" containerID="4636538c68e79519927e1bdd996a1c06be143b6c3a33711ec80082fb835e046c" exitCode=0 Nov 26 22:44:20 crc kubenswrapper[4903]: I1126 22:44:20.875107 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fab088e1-6d09-4778-bc6c-1ee9ef695207","Type":"ContainerDied","Data":"283b2c8afa43d21ee775601d8bd97c7c93bc3a6db8a957b602792cb389e98a8f"} Nov 26 22:44:20 crc kubenswrapper[4903]: I1126 22:44:20.875133 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fab088e1-6d09-4778-bc6c-1ee9ef695207","Type":"ContainerDied","Data":"5fe72152458ad2fc3fb198ee0ff3d0000dae3ae3763f3b2d5035bc605b501131"} Nov 26 22:44:20 crc kubenswrapper[4903]: I1126 22:44:20.875145 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fab088e1-6d09-4778-bc6c-1ee9ef695207","Type":"ContainerDied","Data":"4636538c68e79519927e1bdd996a1c06be143b6c3a33711ec80082fb835e046c"} Nov 26 22:44:21 crc kubenswrapper[4903]: I1126 22:44:21.117993 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 22:44:21 crc kubenswrapper[4903]: I1126 22:44:21.118047 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 22:44:21 crc kubenswrapper[4903]: I1126 22:44:21.251765 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 26 22:44:21 crc kubenswrapper[4903]: I1126 22:44:21.290783 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 26 22:44:21 crc kubenswrapper[4903]: I1126 22:44:21.888409 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"05d39c3e-b34f-42ae-ad74-b18e1fd0fced","Type":"ContainerStarted","Data":"8c1bee17465b80b4cb1a11d97be13619f4c069e51c73e13c10c3a1944254b9ba"} Nov 26 22:44:21 crc kubenswrapper[4903]: I1126 22:44:21.918589 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 26 22:44:22 crc kubenswrapper[4903]: I1126 22:44:22.200896 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="400ca6f8-5d9f-4da1-94ea-658585d32f21" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.240:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 22:44:22 crc kubenswrapper[4903]: I1126 22:44:22.201188 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="400ca6f8-5d9f-4da1-94ea-658585d32f21" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.240:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:23.916808 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"05d39c3e-b34f-42ae-ad74-b18e1fd0fced","Type":"ContainerStarted","Data":"a4d3f1ffa5451c91c503b3bea407b54f1c3e7d887c84556618022ed81b5efc84"} Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:23.924001 4903 generic.go:334] "Generic (PLEG): container finished" podID="fab088e1-6d09-4778-bc6c-1ee9ef695207" containerID="e35851a05f8cf98c55df9dbf6d7af444e0a39f90b8a2688daacb5d9f48067cd6" exitCode=0 Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:23.924039 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fab088e1-6d09-4778-bc6c-1ee9ef695207","Type":"ContainerDied","Data":"e35851a05f8cf98c55df9dbf6d7af444e0a39f90b8a2688daacb5d9f48067cd6"} Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:24.733293 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:24.873405 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q8nfw\" (UniqueName: \"kubernetes.io/projected/fab088e1-6d09-4778-bc6c-1ee9ef695207-kube-api-access-q8nfw\") pod \"fab088e1-6d09-4778-bc6c-1ee9ef695207\" (UID: \"fab088e1-6d09-4778-bc6c-1ee9ef695207\") " Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:24.873485 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fab088e1-6d09-4778-bc6c-1ee9ef695207-run-httpd\") pod \"fab088e1-6d09-4778-bc6c-1ee9ef695207\" (UID: \"fab088e1-6d09-4778-bc6c-1ee9ef695207\") " Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:24.873764 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fab088e1-6d09-4778-bc6c-1ee9ef695207-sg-core-conf-yaml\") pod \"fab088e1-6d09-4778-bc6c-1ee9ef695207\" (UID: \"fab088e1-6d09-4778-bc6c-1ee9ef695207\") " Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:24.873809 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fab088e1-6d09-4778-bc6c-1ee9ef695207-scripts\") pod \"fab088e1-6d09-4778-bc6c-1ee9ef695207\" (UID: \"fab088e1-6d09-4778-bc6c-1ee9ef695207\") " Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:24.873828 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fab088e1-6d09-4778-bc6c-1ee9ef695207-log-httpd\") pod \"fab088e1-6d09-4778-bc6c-1ee9ef695207\" (UID: \"fab088e1-6d09-4778-bc6c-1ee9ef695207\") " Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:24.873886 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fab088e1-6d09-4778-bc6c-1ee9ef695207-config-data\") pod \"fab088e1-6d09-4778-bc6c-1ee9ef695207\" (UID: \"fab088e1-6d09-4778-bc6c-1ee9ef695207\") " Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:24.873915 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fab088e1-6d09-4778-bc6c-1ee9ef695207-combined-ca-bundle\") pod \"fab088e1-6d09-4778-bc6c-1ee9ef695207\" (UID: \"fab088e1-6d09-4778-bc6c-1ee9ef695207\") " Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:24.874187 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fab088e1-6d09-4778-bc6c-1ee9ef695207-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "fab088e1-6d09-4778-bc6c-1ee9ef695207" (UID: "fab088e1-6d09-4778-bc6c-1ee9ef695207"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:24.874337 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fab088e1-6d09-4778-bc6c-1ee9ef695207-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "fab088e1-6d09-4778-bc6c-1ee9ef695207" (UID: "fab088e1-6d09-4778-bc6c-1ee9ef695207"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:24.875042 4903 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fab088e1-6d09-4778-bc6c-1ee9ef695207-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:24.875063 4903 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fab088e1-6d09-4778-bc6c-1ee9ef695207-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:24.879857 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fab088e1-6d09-4778-bc6c-1ee9ef695207-scripts" (OuterVolumeSpecName: "scripts") pod "fab088e1-6d09-4778-bc6c-1ee9ef695207" (UID: "fab088e1-6d09-4778-bc6c-1ee9ef695207"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:24.880601 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fab088e1-6d09-4778-bc6c-1ee9ef695207-kube-api-access-q8nfw" (OuterVolumeSpecName: "kube-api-access-q8nfw") pod "fab088e1-6d09-4778-bc6c-1ee9ef695207" (UID: "fab088e1-6d09-4778-bc6c-1ee9ef695207"). InnerVolumeSpecName "kube-api-access-q8nfw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:24.907420 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fab088e1-6d09-4778-bc6c-1ee9ef695207-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "fab088e1-6d09-4778-bc6c-1ee9ef695207" (UID: "fab088e1-6d09-4778-bc6c-1ee9ef695207"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:24.953129 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"05d39c3e-b34f-42ae-ad74-b18e1fd0fced","Type":"ContainerStarted","Data":"f94a9685d95b73faa64569f69fde2e4349a42fdc2b8f86e5268fed3277f30a31"} Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:24.953362 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" containerName="aodh-api" containerID="cri-o://1bcbf8c97528e31716b7273f2ed140fd10e0e7cb9cb0eeb0ac0cb99627562fd5" gracePeriod=30 Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:24.953537 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" containerName="aodh-notifier" containerID="cri-o://a4d3f1ffa5451c91c503b3bea407b54f1c3e7d887c84556618022ed81b5efc84" gracePeriod=30 Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:24.953638 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" containerName="aodh-listener" containerID="cri-o://f94a9685d95b73faa64569f69fde2e4349a42fdc2b8f86e5268fed3277f30a31" gracePeriod=30 Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:24.953888 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" containerName="aodh-evaluator" containerID="cri-o://8c1bee17465b80b4cb1a11d97be13619f4c069e51c73e13c10c3a1944254b9ba" gracePeriod=30 Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:24.957389 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fab088e1-6d09-4778-bc6c-1ee9ef695207","Type":"ContainerDied","Data":"e3b8047a7f8db3545f48296cc9ac8069deaaaffce81df8072617baa4d77b4f6d"} Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:24.957446 4903 scope.go:117] "RemoveContainer" containerID="283b2c8afa43d21ee775601d8bd97c7c93bc3a6db8a957b602792cb389e98a8f" Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:24.957546 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:24.989838 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:24.990897 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fab088e1-6d09-4778-bc6c-1ee9ef695207-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fab088e1-6d09-4778-bc6c-1ee9ef695207" (UID: "fab088e1-6d09-4778-bc6c-1ee9ef695207"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:24.990957 4903 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fab088e1-6d09-4778-bc6c-1ee9ef695207-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:24.990983 4903 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fab088e1-6d09-4778-bc6c-1ee9ef695207-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:24.991001 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q8nfw\" (UniqueName: \"kubernetes.io/projected/fab088e1-6d09-4778-bc6c-1ee9ef695207-kube-api-access-q8nfw\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:24 crc kubenswrapper[4903]: I1126 22:44:24.993078 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.003860 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=1.5926460059999998 podStartE2EDuration="8.003837632s" podCreationTimestamp="2025-11-26 22:44:17 +0000 UTC" firstStartedPulling="2025-11-26 22:44:18.155895031 +0000 UTC m=+1386.846129941" lastFinishedPulling="2025-11-26 22:44:24.567086657 +0000 UTC m=+1393.257321567" observedRunningTime="2025-11-26 22:44:24.978452601 +0000 UTC m=+1393.668687511" watchObservedRunningTime="2025-11-26 22:44:25.003837632 +0000 UTC m=+1393.694072542" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.004122 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.010322 4903 scope.go:117] "RemoveContainer" containerID="5fe72152458ad2fc3fb198ee0ff3d0000dae3ae3763f3b2d5035bc605b501131" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.037508 4903 scope.go:117] "RemoveContainer" containerID="e35851a05f8cf98c55df9dbf6d7af444e0a39f90b8a2688daacb5d9f48067cd6" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.073552 4903 scope.go:117] "RemoveContainer" containerID="4636538c68e79519927e1bdd996a1c06be143b6c3a33711ec80082fb835e046c" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.076580 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fab088e1-6d09-4778-bc6c-1ee9ef695207-config-data" (OuterVolumeSpecName: "config-data") pod "fab088e1-6d09-4778-bc6c-1ee9ef695207" (UID: "fab088e1-6d09-4778-bc6c-1ee9ef695207"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.093470 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fab088e1-6d09-4778-bc6c-1ee9ef695207-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.093632 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fab088e1-6d09-4778-bc6c-1ee9ef695207-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.301604 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.317671 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.326753 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:44:25 crc kubenswrapper[4903]: E1126 22:44:25.327480 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fab088e1-6d09-4778-bc6c-1ee9ef695207" containerName="proxy-httpd" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.327498 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="fab088e1-6d09-4778-bc6c-1ee9ef695207" containerName="proxy-httpd" Nov 26 22:44:25 crc kubenswrapper[4903]: E1126 22:44:25.327513 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fab088e1-6d09-4778-bc6c-1ee9ef695207" containerName="sg-core" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.327520 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="fab088e1-6d09-4778-bc6c-1ee9ef695207" containerName="sg-core" Nov 26 22:44:25 crc kubenswrapper[4903]: E1126 22:44:25.327548 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fab088e1-6d09-4778-bc6c-1ee9ef695207" containerName="ceilometer-central-agent" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.327554 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="fab088e1-6d09-4778-bc6c-1ee9ef695207" containerName="ceilometer-central-agent" Nov 26 22:44:25 crc kubenswrapper[4903]: E1126 22:44:25.327579 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fab088e1-6d09-4778-bc6c-1ee9ef695207" containerName="ceilometer-notification-agent" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.327586 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="fab088e1-6d09-4778-bc6c-1ee9ef695207" containerName="ceilometer-notification-agent" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.327806 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="fab088e1-6d09-4778-bc6c-1ee9ef695207" containerName="ceilometer-notification-agent" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.327828 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="fab088e1-6d09-4778-bc6c-1ee9ef695207" containerName="ceilometer-central-agent" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.327849 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="fab088e1-6d09-4778-bc6c-1ee9ef695207" containerName="proxy-httpd" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.327862 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="fab088e1-6d09-4778-bc6c-1ee9ef695207" containerName="sg-core" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.330007 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.337234 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.338055 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.348506 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.400230 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fec2f7d7-23f9-424b-a0db-fc318d09665a-log-httpd\") pod \"ceilometer-0\" (UID: \"fec2f7d7-23f9-424b-a0db-fc318d09665a\") " pod="openstack/ceilometer-0" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.400276 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fec2f7d7-23f9-424b-a0db-fc318d09665a-config-data\") pod \"ceilometer-0\" (UID: \"fec2f7d7-23f9-424b-a0db-fc318d09665a\") " pod="openstack/ceilometer-0" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.400297 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fec2f7d7-23f9-424b-a0db-fc318d09665a-run-httpd\") pod \"ceilometer-0\" (UID: \"fec2f7d7-23f9-424b-a0db-fc318d09665a\") " pod="openstack/ceilometer-0" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.400321 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fec2f7d7-23f9-424b-a0db-fc318d09665a-scripts\") pod \"ceilometer-0\" (UID: \"fec2f7d7-23f9-424b-a0db-fc318d09665a\") " pod="openstack/ceilometer-0" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.400343 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fec2f7d7-23f9-424b-a0db-fc318d09665a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fec2f7d7-23f9-424b-a0db-fc318d09665a\") " pod="openstack/ceilometer-0" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.400383 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fec2f7d7-23f9-424b-a0db-fc318d09665a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fec2f7d7-23f9-424b-a0db-fc318d09665a\") " pod="openstack/ceilometer-0" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.400412 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttscs\" (UniqueName: \"kubernetes.io/projected/fec2f7d7-23f9-424b-a0db-fc318d09665a-kube-api-access-ttscs\") pod \"ceilometer-0\" (UID: \"fec2f7d7-23f9-424b-a0db-fc318d09665a\") " pod="openstack/ceilometer-0" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.509061 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fec2f7d7-23f9-424b-a0db-fc318d09665a-log-httpd\") pod \"ceilometer-0\" (UID: \"fec2f7d7-23f9-424b-a0db-fc318d09665a\") " pod="openstack/ceilometer-0" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.509103 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fec2f7d7-23f9-424b-a0db-fc318d09665a-config-data\") pod \"ceilometer-0\" (UID: \"fec2f7d7-23f9-424b-a0db-fc318d09665a\") " pod="openstack/ceilometer-0" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.509127 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fec2f7d7-23f9-424b-a0db-fc318d09665a-run-httpd\") pod \"ceilometer-0\" (UID: \"fec2f7d7-23f9-424b-a0db-fc318d09665a\") " pod="openstack/ceilometer-0" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.509154 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fec2f7d7-23f9-424b-a0db-fc318d09665a-scripts\") pod \"ceilometer-0\" (UID: \"fec2f7d7-23f9-424b-a0db-fc318d09665a\") " pod="openstack/ceilometer-0" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.509175 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fec2f7d7-23f9-424b-a0db-fc318d09665a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fec2f7d7-23f9-424b-a0db-fc318d09665a\") " pod="openstack/ceilometer-0" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.509215 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fec2f7d7-23f9-424b-a0db-fc318d09665a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fec2f7d7-23f9-424b-a0db-fc318d09665a\") " pod="openstack/ceilometer-0" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.509242 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttscs\" (UniqueName: \"kubernetes.io/projected/fec2f7d7-23f9-424b-a0db-fc318d09665a-kube-api-access-ttscs\") pod \"ceilometer-0\" (UID: \"fec2f7d7-23f9-424b-a0db-fc318d09665a\") " pod="openstack/ceilometer-0" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.509587 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fec2f7d7-23f9-424b-a0db-fc318d09665a-log-httpd\") pod \"ceilometer-0\" (UID: \"fec2f7d7-23f9-424b-a0db-fc318d09665a\") " pod="openstack/ceilometer-0" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.514235 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fec2f7d7-23f9-424b-a0db-fc318d09665a-config-data\") pod \"ceilometer-0\" (UID: \"fec2f7d7-23f9-424b-a0db-fc318d09665a\") " pod="openstack/ceilometer-0" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.514474 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fec2f7d7-23f9-424b-a0db-fc318d09665a-run-httpd\") pod \"ceilometer-0\" (UID: \"fec2f7d7-23f9-424b-a0db-fc318d09665a\") " pod="openstack/ceilometer-0" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.517277 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fec2f7d7-23f9-424b-a0db-fc318d09665a-scripts\") pod \"ceilometer-0\" (UID: \"fec2f7d7-23f9-424b-a0db-fc318d09665a\") " pod="openstack/ceilometer-0" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.520119 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fec2f7d7-23f9-424b-a0db-fc318d09665a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fec2f7d7-23f9-424b-a0db-fc318d09665a\") " pod="openstack/ceilometer-0" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.520153 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fec2f7d7-23f9-424b-a0db-fc318d09665a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fec2f7d7-23f9-424b-a0db-fc318d09665a\") " pod="openstack/ceilometer-0" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.528117 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttscs\" (UniqueName: \"kubernetes.io/projected/fec2f7d7-23f9-424b-a0db-fc318d09665a-kube-api-access-ttscs\") pod \"ceilometer-0\" (UID: \"fec2f7d7-23f9-424b-a0db-fc318d09665a\") " pod="openstack/ceilometer-0" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.666470 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.981204 4903 generic.go:334] "Generic (PLEG): container finished" podID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" containerID="a4d3f1ffa5451c91c503b3bea407b54f1c3e7d887c84556618022ed81b5efc84" exitCode=0 Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.981494 4903 generic.go:334] "Generic (PLEG): container finished" podID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" containerID="8c1bee17465b80b4cb1a11d97be13619f4c069e51c73e13c10c3a1944254b9ba" exitCode=0 Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.981509 4903 generic.go:334] "Generic (PLEG): container finished" podID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" containerID="1bcbf8c97528e31716b7273f2ed140fd10e0e7cb9cb0eeb0ac0cb99627562fd5" exitCode=0 Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.981299 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"05d39c3e-b34f-42ae-ad74-b18e1fd0fced","Type":"ContainerDied","Data":"a4d3f1ffa5451c91c503b3bea407b54f1c3e7d887c84556618022ed81b5efc84"} Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.981585 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"05d39c3e-b34f-42ae-ad74-b18e1fd0fced","Type":"ContainerDied","Data":"8c1bee17465b80b4cb1a11d97be13619f4c069e51c73e13c10c3a1944254b9ba"} Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.981628 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"05d39c3e-b34f-42ae-ad74-b18e1fd0fced","Type":"ContainerDied","Data":"1bcbf8c97528e31716b7273f2ed140fd10e0e7cb9cb0eeb0ac0cb99627562fd5"} Nov 26 22:44:25 crc kubenswrapper[4903]: I1126 22:44:25.993182 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 22:44:26 crc kubenswrapper[4903]: I1126 22:44:26.043830 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fab088e1-6d09-4778-bc6c-1ee9ef695207" path="/var/lib/kubelet/pods/fab088e1-6d09-4778-bc6c-1ee9ef695207/volumes" Nov 26 22:44:26 crc kubenswrapper[4903]: I1126 22:44:26.165591 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:44:26 crc kubenswrapper[4903]: I1126 22:44:26.614542 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:44:26 crc kubenswrapper[4903]: I1126 22:44:26.740145 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7qtv5\" (UniqueName: \"kubernetes.io/projected/df060096-cf5d-46ee-a2c8-8ba582a39e0f-kube-api-access-7qtv5\") pod \"df060096-cf5d-46ee-a2c8-8ba582a39e0f\" (UID: \"df060096-cf5d-46ee-a2c8-8ba582a39e0f\") " Nov 26 22:44:26 crc kubenswrapper[4903]: I1126 22:44:26.740249 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df060096-cf5d-46ee-a2c8-8ba582a39e0f-config-data\") pod \"df060096-cf5d-46ee-a2c8-8ba582a39e0f\" (UID: \"df060096-cf5d-46ee-a2c8-8ba582a39e0f\") " Nov 26 22:44:26 crc kubenswrapper[4903]: I1126 22:44:26.740269 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df060096-cf5d-46ee-a2c8-8ba582a39e0f-combined-ca-bundle\") pod \"df060096-cf5d-46ee-a2c8-8ba582a39e0f\" (UID: \"df060096-cf5d-46ee-a2c8-8ba582a39e0f\") " Nov 26 22:44:26 crc kubenswrapper[4903]: I1126 22:44:26.745723 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df060096-cf5d-46ee-a2c8-8ba582a39e0f-kube-api-access-7qtv5" (OuterVolumeSpecName: "kube-api-access-7qtv5") pod "df060096-cf5d-46ee-a2c8-8ba582a39e0f" (UID: "df060096-cf5d-46ee-a2c8-8ba582a39e0f"). InnerVolumeSpecName "kube-api-access-7qtv5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:44:26 crc kubenswrapper[4903]: I1126 22:44:26.772946 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/df060096-cf5d-46ee-a2c8-8ba582a39e0f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "df060096-cf5d-46ee-a2c8-8ba582a39e0f" (UID: "df060096-cf5d-46ee-a2c8-8ba582a39e0f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:26 crc kubenswrapper[4903]: I1126 22:44:26.782228 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/df060096-cf5d-46ee-a2c8-8ba582a39e0f-config-data" (OuterVolumeSpecName: "config-data") pod "df060096-cf5d-46ee-a2c8-8ba582a39e0f" (UID: "df060096-cf5d-46ee-a2c8-8ba582a39e0f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:26 crc kubenswrapper[4903]: I1126 22:44:26.843045 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7qtv5\" (UniqueName: \"kubernetes.io/projected/df060096-cf5d-46ee-a2c8-8ba582a39e0f-kube-api-access-7qtv5\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:26 crc kubenswrapper[4903]: I1126 22:44:26.843081 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df060096-cf5d-46ee-a2c8-8ba582a39e0f-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:26 crc kubenswrapper[4903]: I1126 22:44:26.843091 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df060096-cf5d-46ee-a2c8-8ba582a39e0f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:26 crc kubenswrapper[4903]: I1126 22:44:26.994261 4903 generic.go:334] "Generic (PLEG): container finished" podID="df060096-cf5d-46ee-a2c8-8ba582a39e0f" containerID="0a32496b7bc91a4ff6f5287b8a32fc51b22647360ab1a31205127065324f81ae" exitCode=137 Nov 26 22:44:26 crc kubenswrapper[4903]: I1126 22:44:26.994315 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:44:26 crc kubenswrapper[4903]: I1126 22:44:26.994345 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"df060096-cf5d-46ee-a2c8-8ba582a39e0f","Type":"ContainerDied","Data":"0a32496b7bc91a4ff6f5287b8a32fc51b22647360ab1a31205127065324f81ae"} Nov 26 22:44:26 crc kubenswrapper[4903]: I1126 22:44:26.994377 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"df060096-cf5d-46ee-a2c8-8ba582a39e0f","Type":"ContainerDied","Data":"38d8c06c67278de08df41d86346f1577a25e708d273054bbea437a4294cd2ad7"} Nov 26 22:44:26 crc kubenswrapper[4903]: I1126 22:44:26.994397 4903 scope.go:117] "RemoveContainer" containerID="0a32496b7bc91a4ff6f5287b8a32fc51b22647360ab1a31205127065324f81ae" Nov 26 22:44:26 crc kubenswrapper[4903]: I1126 22:44:26.998371 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fec2f7d7-23f9-424b-a0db-fc318d09665a","Type":"ContainerStarted","Data":"550b19a920849b328e0bc0096eab57220725c0cdaa240ed00cf8a8d661ade73f"} Nov 26 22:44:27 crc kubenswrapper[4903]: I1126 22:44:27.025866 4903 scope.go:117] "RemoveContainer" containerID="0a32496b7bc91a4ff6f5287b8a32fc51b22647360ab1a31205127065324f81ae" Nov 26 22:44:27 crc kubenswrapper[4903]: E1126 22:44:27.026371 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a32496b7bc91a4ff6f5287b8a32fc51b22647360ab1a31205127065324f81ae\": container with ID starting with 0a32496b7bc91a4ff6f5287b8a32fc51b22647360ab1a31205127065324f81ae not found: ID does not exist" containerID="0a32496b7bc91a4ff6f5287b8a32fc51b22647360ab1a31205127065324f81ae" Nov 26 22:44:27 crc kubenswrapper[4903]: I1126 22:44:27.026407 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a32496b7bc91a4ff6f5287b8a32fc51b22647360ab1a31205127065324f81ae"} err="failed to get container status \"0a32496b7bc91a4ff6f5287b8a32fc51b22647360ab1a31205127065324f81ae\": rpc error: code = NotFound desc = could not find container \"0a32496b7bc91a4ff6f5287b8a32fc51b22647360ab1a31205127065324f81ae\": container with ID starting with 0a32496b7bc91a4ff6f5287b8a32fc51b22647360ab1a31205127065324f81ae not found: ID does not exist" Nov 26 22:44:27 crc kubenswrapper[4903]: I1126 22:44:27.032526 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 22:44:27 crc kubenswrapper[4903]: I1126 22:44:27.056602 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 22:44:27 crc kubenswrapper[4903]: I1126 22:44:27.090302 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 22:44:27 crc kubenswrapper[4903]: E1126 22:44:27.091046 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df060096-cf5d-46ee-a2c8-8ba582a39e0f" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 22:44:27 crc kubenswrapper[4903]: I1126 22:44:27.091062 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="df060096-cf5d-46ee-a2c8-8ba582a39e0f" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 22:44:27 crc kubenswrapper[4903]: I1126 22:44:27.091316 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="df060096-cf5d-46ee-a2c8-8ba582a39e0f" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 22:44:27 crc kubenswrapper[4903]: I1126 22:44:27.092138 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:44:27 crc kubenswrapper[4903]: I1126 22:44:27.095893 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 26 22:44:27 crc kubenswrapper[4903]: I1126 22:44:27.096087 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 26 22:44:27 crc kubenswrapper[4903]: I1126 22:44:27.096291 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 26 22:44:27 crc kubenswrapper[4903]: I1126 22:44:27.100036 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 22:44:27 crc kubenswrapper[4903]: I1126 22:44:27.252618 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/81026daf-ddcc-4599-8458-b8280d48c920-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"81026daf-ddcc-4599-8458-b8280d48c920\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:44:27 crc kubenswrapper[4903]: I1126 22:44:27.252680 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9fndx\" (UniqueName: \"kubernetes.io/projected/81026daf-ddcc-4599-8458-b8280d48c920-kube-api-access-9fndx\") pod \"nova-cell1-novncproxy-0\" (UID: \"81026daf-ddcc-4599-8458-b8280d48c920\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:44:27 crc kubenswrapper[4903]: I1126 22:44:27.252885 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81026daf-ddcc-4599-8458-b8280d48c920-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"81026daf-ddcc-4599-8458-b8280d48c920\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:44:27 crc kubenswrapper[4903]: I1126 22:44:27.252972 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/81026daf-ddcc-4599-8458-b8280d48c920-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"81026daf-ddcc-4599-8458-b8280d48c920\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:44:27 crc kubenswrapper[4903]: I1126 22:44:27.253123 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81026daf-ddcc-4599-8458-b8280d48c920-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"81026daf-ddcc-4599-8458-b8280d48c920\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:44:27 crc kubenswrapper[4903]: I1126 22:44:27.356035 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81026daf-ddcc-4599-8458-b8280d48c920-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"81026daf-ddcc-4599-8458-b8280d48c920\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:44:27 crc kubenswrapper[4903]: I1126 22:44:27.356295 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/81026daf-ddcc-4599-8458-b8280d48c920-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"81026daf-ddcc-4599-8458-b8280d48c920\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:44:27 crc kubenswrapper[4903]: I1126 22:44:27.356376 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81026daf-ddcc-4599-8458-b8280d48c920-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"81026daf-ddcc-4599-8458-b8280d48c920\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:44:27 crc kubenswrapper[4903]: I1126 22:44:27.356450 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/81026daf-ddcc-4599-8458-b8280d48c920-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"81026daf-ddcc-4599-8458-b8280d48c920\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:44:27 crc kubenswrapper[4903]: I1126 22:44:27.356466 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9fndx\" (UniqueName: \"kubernetes.io/projected/81026daf-ddcc-4599-8458-b8280d48c920-kube-api-access-9fndx\") pod \"nova-cell1-novncproxy-0\" (UID: \"81026daf-ddcc-4599-8458-b8280d48c920\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:44:27 crc kubenswrapper[4903]: I1126 22:44:27.375661 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81026daf-ddcc-4599-8458-b8280d48c920-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"81026daf-ddcc-4599-8458-b8280d48c920\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:44:27 crc kubenswrapper[4903]: I1126 22:44:27.382282 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/81026daf-ddcc-4599-8458-b8280d48c920-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"81026daf-ddcc-4599-8458-b8280d48c920\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:44:27 crc kubenswrapper[4903]: I1126 22:44:27.390366 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81026daf-ddcc-4599-8458-b8280d48c920-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"81026daf-ddcc-4599-8458-b8280d48c920\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:44:27 crc kubenswrapper[4903]: I1126 22:44:27.411017 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/81026daf-ddcc-4599-8458-b8280d48c920-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"81026daf-ddcc-4599-8458-b8280d48c920\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:44:27 crc kubenswrapper[4903]: I1126 22:44:27.426271 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9fndx\" (UniqueName: \"kubernetes.io/projected/81026daf-ddcc-4599-8458-b8280d48c920-kube-api-access-9fndx\") pod \"nova-cell1-novncproxy-0\" (UID: \"81026daf-ddcc-4599-8458-b8280d48c920\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:44:27 crc kubenswrapper[4903]: I1126 22:44:27.716891 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:44:28 crc kubenswrapper[4903]: I1126 22:44:28.026325 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fec2f7d7-23f9-424b-a0db-fc318d09665a","Type":"ContainerStarted","Data":"99e7a3e6b161c61366d2951bf2985f149da9355c22a3c3b41360647f78178588"} Nov 26 22:44:28 crc kubenswrapper[4903]: I1126 22:44:28.026771 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fec2f7d7-23f9-424b-a0db-fc318d09665a","Type":"ContainerStarted","Data":"317e5f446e10af6dca72c092a896217f03e02ef767532b1d67e9df1e41c64263"} Nov 26 22:44:28 crc kubenswrapper[4903]: I1126 22:44:28.057463 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="df060096-cf5d-46ee-a2c8-8ba582a39e0f" path="/var/lib/kubelet/pods/df060096-cf5d-46ee-a2c8-8ba582a39e0f/volumes" Nov 26 22:44:28 crc kubenswrapper[4903]: I1126 22:44:28.218921 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 22:44:29 crc kubenswrapper[4903]: I1126 22:44:29.042253 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fec2f7d7-23f9-424b-a0db-fc318d09665a","Type":"ContainerStarted","Data":"86e5393a270551fd7051acf2584c2dac025018dd27a3b11c07e8f2378ca3b3ff"} Nov 26 22:44:29 crc kubenswrapper[4903]: I1126 22:44:29.044282 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"81026daf-ddcc-4599-8458-b8280d48c920","Type":"ContainerStarted","Data":"1ed338ec05315164532d787da6aba54e317501bbf6e8c2343ff5ad4b77f3cefb"} Nov 26 22:44:29 crc kubenswrapper[4903]: I1126 22:44:29.044344 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"81026daf-ddcc-4599-8458-b8280d48c920","Type":"ContainerStarted","Data":"93969fe0e66c9fa254a277a6f422725c8b0df90333dc5900b70259ae1e14d569"} Nov 26 22:44:29 crc kubenswrapper[4903]: I1126 22:44:29.070058 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.070032777 podStartE2EDuration="2.070032777s" podCreationTimestamp="2025-11-26 22:44:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:44:29.060138982 +0000 UTC m=+1397.750373892" watchObservedRunningTime="2025-11-26 22:44:29.070032777 +0000 UTC m=+1397.760267687" Nov 26 22:44:31 crc kubenswrapper[4903]: I1126 22:44:31.070912 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fec2f7d7-23f9-424b-a0db-fc318d09665a","Type":"ContainerStarted","Data":"efcfffbe103af9fc6f3cbdc3d34fd2fe439220f5ee3d1eae14df35ec0bb91994"} Nov 26 22:44:31 crc kubenswrapper[4903]: I1126 22:44:31.071604 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 22:44:31 crc kubenswrapper[4903]: I1126 22:44:31.112216 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.2305621589999998 podStartE2EDuration="6.112196774s" podCreationTimestamp="2025-11-26 22:44:25 +0000 UTC" firstStartedPulling="2025-11-26 22:44:26.235026336 +0000 UTC m=+1394.925261266" lastFinishedPulling="2025-11-26 22:44:30.116660981 +0000 UTC m=+1398.806895881" observedRunningTime="2025-11-26 22:44:31.10456997 +0000 UTC m=+1399.794804880" watchObservedRunningTime="2025-11-26 22:44:31.112196774 +0000 UTC m=+1399.802431684" Nov 26 22:44:31 crc kubenswrapper[4903]: I1126 22:44:31.127080 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 22:44:31 crc kubenswrapper[4903]: I1126 22:44:31.128214 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 22:44:31 crc kubenswrapper[4903]: I1126 22:44:31.131265 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 22:44:31 crc kubenswrapper[4903]: I1126 22:44:31.134837 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 22:44:32 crc kubenswrapper[4903]: I1126 22:44:32.081978 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 22:44:32 crc kubenswrapper[4903]: I1126 22:44:32.088804 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 22:44:32 crc kubenswrapper[4903]: I1126 22:44:32.326660 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6b7bbf7cf9-9lccp"] Nov 26 22:44:32 crc kubenswrapper[4903]: I1126 22:44:32.328989 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" Nov 26 22:44:32 crc kubenswrapper[4903]: I1126 22:44:32.341923 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b7bbf7cf9-9lccp"] Nov 26 22:44:32 crc kubenswrapper[4903]: I1126 22:44:32.398035 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ec06e192-e766-4bb7-9c9d-1d2dd8058270-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7bbf7cf9-9lccp\" (UID: \"ec06e192-e766-4bb7-9c9d-1d2dd8058270\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" Nov 26 22:44:32 crc kubenswrapper[4903]: I1126 22:44:32.398091 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mt4vk\" (UniqueName: \"kubernetes.io/projected/ec06e192-e766-4bb7-9c9d-1d2dd8058270-kube-api-access-mt4vk\") pod \"dnsmasq-dns-6b7bbf7cf9-9lccp\" (UID: \"ec06e192-e766-4bb7-9c9d-1d2dd8058270\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" Nov 26 22:44:32 crc kubenswrapper[4903]: I1126 22:44:32.398245 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ec06e192-e766-4bb7-9c9d-1d2dd8058270-config\") pod \"dnsmasq-dns-6b7bbf7cf9-9lccp\" (UID: \"ec06e192-e766-4bb7-9c9d-1d2dd8058270\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" Nov 26 22:44:32 crc kubenswrapper[4903]: I1126 22:44:32.398315 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ec06e192-e766-4bb7-9c9d-1d2dd8058270-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7bbf7cf9-9lccp\" (UID: \"ec06e192-e766-4bb7-9c9d-1d2dd8058270\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" Nov 26 22:44:32 crc kubenswrapper[4903]: I1126 22:44:32.398362 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ec06e192-e766-4bb7-9c9d-1d2dd8058270-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7bbf7cf9-9lccp\" (UID: \"ec06e192-e766-4bb7-9c9d-1d2dd8058270\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" Nov 26 22:44:32 crc kubenswrapper[4903]: I1126 22:44:32.398384 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ec06e192-e766-4bb7-9c9d-1d2dd8058270-dns-svc\") pod \"dnsmasq-dns-6b7bbf7cf9-9lccp\" (UID: \"ec06e192-e766-4bb7-9c9d-1d2dd8058270\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" Nov 26 22:44:32 crc kubenswrapper[4903]: I1126 22:44:32.500730 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ec06e192-e766-4bb7-9c9d-1d2dd8058270-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7bbf7cf9-9lccp\" (UID: \"ec06e192-e766-4bb7-9c9d-1d2dd8058270\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" Nov 26 22:44:32 crc kubenswrapper[4903]: I1126 22:44:32.500783 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mt4vk\" (UniqueName: \"kubernetes.io/projected/ec06e192-e766-4bb7-9c9d-1d2dd8058270-kube-api-access-mt4vk\") pod \"dnsmasq-dns-6b7bbf7cf9-9lccp\" (UID: \"ec06e192-e766-4bb7-9c9d-1d2dd8058270\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" Nov 26 22:44:32 crc kubenswrapper[4903]: I1126 22:44:32.500913 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ec06e192-e766-4bb7-9c9d-1d2dd8058270-config\") pod \"dnsmasq-dns-6b7bbf7cf9-9lccp\" (UID: \"ec06e192-e766-4bb7-9c9d-1d2dd8058270\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" Nov 26 22:44:32 crc kubenswrapper[4903]: I1126 22:44:32.500990 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ec06e192-e766-4bb7-9c9d-1d2dd8058270-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7bbf7cf9-9lccp\" (UID: \"ec06e192-e766-4bb7-9c9d-1d2dd8058270\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" Nov 26 22:44:32 crc kubenswrapper[4903]: I1126 22:44:32.501018 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ec06e192-e766-4bb7-9c9d-1d2dd8058270-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7bbf7cf9-9lccp\" (UID: \"ec06e192-e766-4bb7-9c9d-1d2dd8058270\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" Nov 26 22:44:32 crc kubenswrapper[4903]: I1126 22:44:32.501041 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ec06e192-e766-4bb7-9c9d-1d2dd8058270-dns-svc\") pod \"dnsmasq-dns-6b7bbf7cf9-9lccp\" (UID: \"ec06e192-e766-4bb7-9c9d-1d2dd8058270\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" Nov 26 22:44:32 crc kubenswrapper[4903]: I1126 22:44:32.501629 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ec06e192-e766-4bb7-9c9d-1d2dd8058270-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7bbf7cf9-9lccp\" (UID: \"ec06e192-e766-4bb7-9c9d-1d2dd8058270\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" Nov 26 22:44:32 crc kubenswrapper[4903]: I1126 22:44:32.501822 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ec06e192-e766-4bb7-9c9d-1d2dd8058270-config\") pod \"dnsmasq-dns-6b7bbf7cf9-9lccp\" (UID: \"ec06e192-e766-4bb7-9c9d-1d2dd8058270\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" Nov 26 22:44:32 crc kubenswrapper[4903]: I1126 22:44:32.501878 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ec06e192-e766-4bb7-9c9d-1d2dd8058270-dns-svc\") pod \"dnsmasq-dns-6b7bbf7cf9-9lccp\" (UID: \"ec06e192-e766-4bb7-9c9d-1d2dd8058270\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" Nov 26 22:44:32 crc kubenswrapper[4903]: I1126 22:44:32.501898 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ec06e192-e766-4bb7-9c9d-1d2dd8058270-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7bbf7cf9-9lccp\" (UID: \"ec06e192-e766-4bb7-9c9d-1d2dd8058270\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" Nov 26 22:44:32 crc kubenswrapper[4903]: I1126 22:44:32.502051 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ec06e192-e766-4bb7-9c9d-1d2dd8058270-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7bbf7cf9-9lccp\" (UID: \"ec06e192-e766-4bb7-9c9d-1d2dd8058270\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" Nov 26 22:44:32 crc kubenswrapper[4903]: I1126 22:44:32.536345 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mt4vk\" (UniqueName: \"kubernetes.io/projected/ec06e192-e766-4bb7-9c9d-1d2dd8058270-kube-api-access-mt4vk\") pod \"dnsmasq-dns-6b7bbf7cf9-9lccp\" (UID: \"ec06e192-e766-4bb7-9c9d-1d2dd8058270\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" Nov 26 22:44:32 crc kubenswrapper[4903]: I1126 22:44:32.649238 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" Nov 26 22:44:32 crc kubenswrapper[4903]: I1126 22:44:32.717927 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:44:33 crc kubenswrapper[4903]: I1126 22:44:33.175458 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b7bbf7cf9-9lccp"] Nov 26 22:44:34 crc kubenswrapper[4903]: I1126 22:44:34.101937 4903 generic.go:334] "Generic (PLEG): container finished" podID="ec06e192-e766-4bb7-9c9d-1d2dd8058270" containerID="841e0731bbba857acefe2e1ae0cdf5f03a08a1905f60e9706af5bff18c00d0eb" exitCode=0 Nov 26 22:44:34 crc kubenswrapper[4903]: I1126 22:44:34.102038 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" event={"ID":"ec06e192-e766-4bb7-9c9d-1d2dd8058270","Type":"ContainerDied","Data":"841e0731bbba857acefe2e1ae0cdf5f03a08a1905f60e9706af5bff18c00d0eb"} Nov 26 22:44:34 crc kubenswrapper[4903]: I1126 22:44:34.102328 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" event={"ID":"ec06e192-e766-4bb7-9c9d-1d2dd8058270","Type":"ContainerStarted","Data":"51e7343869de99bfbe2e7e76495b0a5465045c4a7c22af78901ae0dddcd90aa6"} Nov 26 22:44:34 crc kubenswrapper[4903]: I1126 22:44:34.787785 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:44:34 crc kubenswrapper[4903]: I1126 22:44:34.788384 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fec2f7d7-23f9-424b-a0db-fc318d09665a" containerName="proxy-httpd" containerID="cri-o://efcfffbe103af9fc6f3cbdc3d34fd2fe439220f5ee3d1eae14df35ec0bb91994" gracePeriod=30 Nov 26 22:44:34 crc kubenswrapper[4903]: I1126 22:44:34.788451 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fec2f7d7-23f9-424b-a0db-fc318d09665a" containerName="sg-core" containerID="cri-o://86e5393a270551fd7051acf2584c2dac025018dd27a3b11c07e8f2378ca3b3ff" gracePeriod=30 Nov 26 22:44:34 crc kubenswrapper[4903]: I1126 22:44:34.788335 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fec2f7d7-23f9-424b-a0db-fc318d09665a" containerName="ceilometer-central-agent" containerID="cri-o://317e5f446e10af6dca72c092a896217f03e02ef767532b1d67e9df1e41c64263" gracePeriod=30 Nov 26 22:44:34 crc kubenswrapper[4903]: I1126 22:44:34.788517 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fec2f7d7-23f9-424b-a0db-fc318d09665a" containerName="ceilometer-notification-agent" containerID="cri-o://99e7a3e6b161c61366d2951bf2985f149da9355c22a3c3b41360647f78178588" gracePeriod=30 Nov 26 22:44:34 crc kubenswrapper[4903]: I1126 22:44:34.938344 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.113072 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" event={"ID":"ec06e192-e766-4bb7-9c9d-1d2dd8058270","Type":"ContainerStarted","Data":"bb11b45b8283f2e74de1aff09b45e7b83add9610b5bb0bb25101757b60e3988f"} Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.113320 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.116059 4903 generic.go:334] "Generic (PLEG): container finished" podID="fec2f7d7-23f9-424b-a0db-fc318d09665a" containerID="efcfffbe103af9fc6f3cbdc3d34fd2fe439220f5ee3d1eae14df35ec0bb91994" exitCode=0 Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.116088 4903 generic.go:334] "Generic (PLEG): container finished" podID="fec2f7d7-23f9-424b-a0db-fc318d09665a" containerID="86e5393a270551fd7051acf2584c2dac025018dd27a3b11c07e8f2378ca3b3ff" exitCode=2 Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.116127 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fec2f7d7-23f9-424b-a0db-fc318d09665a","Type":"ContainerDied","Data":"efcfffbe103af9fc6f3cbdc3d34fd2fe439220f5ee3d1eae14df35ec0bb91994"} Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.116158 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fec2f7d7-23f9-424b-a0db-fc318d09665a","Type":"ContainerDied","Data":"86e5393a270551fd7051acf2584c2dac025018dd27a3b11c07e8f2378ca3b3ff"} Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.116269 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="400ca6f8-5d9f-4da1-94ea-658585d32f21" containerName="nova-api-log" containerID="cri-o://914b6bfe9b5f5703737c2612d6a3adbd764425a70140f642413306a9ccff16c7" gracePeriod=30 Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.116306 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="400ca6f8-5d9f-4da1-94ea-658585d32f21" containerName="nova-api-api" containerID="cri-o://290f6900ade24159f2d5c047b2d7b83e3de31485f75aede21928f14f742fddac" gracePeriod=30 Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.137997 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" podStartSLOduration=3.137978786 podStartE2EDuration="3.137978786s" podCreationTimestamp="2025-11-26 22:44:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:44:35.135770617 +0000 UTC m=+1403.826005527" watchObservedRunningTime="2025-11-26 22:44:35.137978786 +0000 UTC m=+1403.828213696" Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.639994 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.789066 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fec2f7d7-23f9-424b-a0db-fc318d09665a-run-httpd\") pod \"fec2f7d7-23f9-424b-a0db-fc318d09665a\" (UID: \"fec2f7d7-23f9-424b-a0db-fc318d09665a\") " Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.789156 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fec2f7d7-23f9-424b-a0db-fc318d09665a-log-httpd\") pod \"fec2f7d7-23f9-424b-a0db-fc318d09665a\" (UID: \"fec2f7d7-23f9-424b-a0db-fc318d09665a\") " Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.789219 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fec2f7d7-23f9-424b-a0db-fc318d09665a-sg-core-conf-yaml\") pod \"fec2f7d7-23f9-424b-a0db-fc318d09665a\" (UID: \"fec2f7d7-23f9-424b-a0db-fc318d09665a\") " Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.789259 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fec2f7d7-23f9-424b-a0db-fc318d09665a-combined-ca-bundle\") pod \"fec2f7d7-23f9-424b-a0db-fc318d09665a\" (UID: \"fec2f7d7-23f9-424b-a0db-fc318d09665a\") " Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.789855 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fec2f7d7-23f9-424b-a0db-fc318d09665a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "fec2f7d7-23f9-424b-a0db-fc318d09665a" (UID: "fec2f7d7-23f9-424b-a0db-fc318d09665a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.789980 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fec2f7d7-23f9-424b-a0db-fc318d09665a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "fec2f7d7-23f9-424b-a0db-fc318d09665a" (UID: "fec2f7d7-23f9-424b-a0db-fc318d09665a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.790154 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fec2f7d7-23f9-424b-a0db-fc318d09665a-config-data\") pod \"fec2f7d7-23f9-424b-a0db-fc318d09665a\" (UID: \"fec2f7d7-23f9-424b-a0db-fc318d09665a\") " Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.790183 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fec2f7d7-23f9-424b-a0db-fc318d09665a-scripts\") pod \"fec2f7d7-23f9-424b-a0db-fc318d09665a\" (UID: \"fec2f7d7-23f9-424b-a0db-fc318d09665a\") " Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.790250 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ttscs\" (UniqueName: \"kubernetes.io/projected/fec2f7d7-23f9-424b-a0db-fc318d09665a-kube-api-access-ttscs\") pod \"fec2f7d7-23f9-424b-a0db-fc318d09665a\" (UID: \"fec2f7d7-23f9-424b-a0db-fc318d09665a\") " Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.790940 4903 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fec2f7d7-23f9-424b-a0db-fc318d09665a-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.790967 4903 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fec2f7d7-23f9-424b-a0db-fc318d09665a-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.794376 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fec2f7d7-23f9-424b-a0db-fc318d09665a-scripts" (OuterVolumeSpecName: "scripts") pod "fec2f7d7-23f9-424b-a0db-fc318d09665a" (UID: "fec2f7d7-23f9-424b-a0db-fc318d09665a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.795653 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fec2f7d7-23f9-424b-a0db-fc318d09665a-kube-api-access-ttscs" (OuterVolumeSpecName: "kube-api-access-ttscs") pod "fec2f7d7-23f9-424b-a0db-fc318d09665a" (UID: "fec2f7d7-23f9-424b-a0db-fc318d09665a"). InnerVolumeSpecName "kube-api-access-ttscs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.839072 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fec2f7d7-23f9-424b-a0db-fc318d09665a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "fec2f7d7-23f9-424b-a0db-fc318d09665a" (UID: "fec2f7d7-23f9-424b-a0db-fc318d09665a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.892586 4903 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fec2f7d7-23f9-424b-a0db-fc318d09665a-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.892624 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ttscs\" (UniqueName: \"kubernetes.io/projected/fec2f7d7-23f9-424b-a0db-fc318d09665a-kube-api-access-ttscs\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.892634 4903 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fec2f7d7-23f9-424b-a0db-fc318d09665a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.909438 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fec2f7d7-23f9-424b-a0db-fc318d09665a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fec2f7d7-23f9-424b-a0db-fc318d09665a" (UID: "fec2f7d7-23f9-424b-a0db-fc318d09665a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.927787 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fec2f7d7-23f9-424b-a0db-fc318d09665a-config-data" (OuterVolumeSpecName: "config-data") pod "fec2f7d7-23f9-424b-a0db-fc318d09665a" (UID: "fec2f7d7-23f9-424b-a0db-fc318d09665a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.994395 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fec2f7d7-23f9-424b-a0db-fc318d09665a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:35 crc kubenswrapper[4903]: I1126 22:44:35.994428 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fec2f7d7-23f9-424b-a0db-fc318d09665a-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.129766 4903 generic.go:334] "Generic (PLEG): container finished" podID="fec2f7d7-23f9-424b-a0db-fc318d09665a" containerID="99e7a3e6b161c61366d2951bf2985f149da9355c22a3c3b41360647f78178588" exitCode=0 Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.129796 4903 generic.go:334] "Generic (PLEG): container finished" podID="fec2f7d7-23f9-424b-a0db-fc318d09665a" containerID="317e5f446e10af6dca72c092a896217f03e02ef767532b1d67e9df1e41c64263" exitCode=0 Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.129843 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fec2f7d7-23f9-424b-a0db-fc318d09665a","Type":"ContainerDied","Data":"99e7a3e6b161c61366d2951bf2985f149da9355c22a3c3b41360647f78178588"} Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.129876 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fec2f7d7-23f9-424b-a0db-fc318d09665a","Type":"ContainerDied","Data":"317e5f446e10af6dca72c092a896217f03e02ef767532b1d67e9df1e41c64263"} Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.129886 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fec2f7d7-23f9-424b-a0db-fc318d09665a","Type":"ContainerDied","Data":"550b19a920849b328e0bc0096eab57220725c0cdaa240ed00cf8a8d661ade73f"} Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.129901 4903 scope.go:117] "RemoveContainer" containerID="efcfffbe103af9fc6f3cbdc3d34fd2fe439220f5ee3d1eae14df35ec0bb91994" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.130033 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.135005 4903 generic.go:334] "Generic (PLEG): container finished" podID="400ca6f8-5d9f-4da1-94ea-658585d32f21" containerID="914b6bfe9b5f5703737c2612d6a3adbd764425a70140f642413306a9ccff16c7" exitCode=143 Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.135650 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"400ca6f8-5d9f-4da1-94ea-658585d32f21","Type":"ContainerDied","Data":"914b6bfe9b5f5703737c2612d6a3adbd764425a70140f642413306a9ccff16c7"} Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.157536 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.165491 4903 scope.go:117] "RemoveContainer" containerID="86e5393a270551fd7051acf2584c2dac025018dd27a3b11c07e8f2378ca3b3ff" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.196012 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.218756 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:44:36 crc kubenswrapper[4903]: E1126 22:44:36.219350 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fec2f7d7-23f9-424b-a0db-fc318d09665a" containerName="ceilometer-notification-agent" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.219371 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="fec2f7d7-23f9-424b-a0db-fc318d09665a" containerName="ceilometer-notification-agent" Nov 26 22:44:36 crc kubenswrapper[4903]: E1126 22:44:36.219401 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fec2f7d7-23f9-424b-a0db-fc318d09665a" containerName="proxy-httpd" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.219410 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="fec2f7d7-23f9-424b-a0db-fc318d09665a" containerName="proxy-httpd" Nov 26 22:44:36 crc kubenswrapper[4903]: E1126 22:44:36.219425 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fec2f7d7-23f9-424b-a0db-fc318d09665a" containerName="ceilometer-central-agent" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.219432 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="fec2f7d7-23f9-424b-a0db-fc318d09665a" containerName="ceilometer-central-agent" Nov 26 22:44:36 crc kubenswrapper[4903]: E1126 22:44:36.219444 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fec2f7d7-23f9-424b-a0db-fc318d09665a" containerName="sg-core" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.219450 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="fec2f7d7-23f9-424b-a0db-fc318d09665a" containerName="sg-core" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.219717 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="fec2f7d7-23f9-424b-a0db-fc318d09665a" containerName="ceilometer-notification-agent" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.219739 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="fec2f7d7-23f9-424b-a0db-fc318d09665a" containerName="proxy-httpd" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.219752 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="fec2f7d7-23f9-424b-a0db-fc318d09665a" containerName="ceilometer-central-agent" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.219770 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="fec2f7d7-23f9-424b-a0db-fc318d09665a" containerName="sg-core" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.222098 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.222906 4903 scope.go:117] "RemoveContainer" containerID="99e7a3e6b161c61366d2951bf2985f149da9355c22a3c3b41360647f78178588" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.227350 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.227520 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.255905 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.302155 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6253c12a-e41a-4476-abda-3b3b7ff084b3-config-data\") pod \"ceilometer-0\" (UID: \"6253c12a-e41a-4476-abda-3b3b7ff084b3\") " pod="openstack/ceilometer-0" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.302205 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6253c12a-e41a-4476-abda-3b3b7ff084b3-log-httpd\") pod \"ceilometer-0\" (UID: \"6253c12a-e41a-4476-abda-3b3b7ff084b3\") " pod="openstack/ceilometer-0" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.302332 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6253c12a-e41a-4476-abda-3b3b7ff084b3-run-httpd\") pod \"ceilometer-0\" (UID: \"6253c12a-e41a-4476-abda-3b3b7ff084b3\") " pod="openstack/ceilometer-0" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.302364 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6253c12a-e41a-4476-abda-3b3b7ff084b3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6253c12a-e41a-4476-abda-3b3b7ff084b3\") " pod="openstack/ceilometer-0" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.302397 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6253c12a-e41a-4476-abda-3b3b7ff084b3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6253c12a-e41a-4476-abda-3b3b7ff084b3\") " pod="openstack/ceilometer-0" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.302435 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9w8c5\" (UniqueName: \"kubernetes.io/projected/6253c12a-e41a-4476-abda-3b3b7ff084b3-kube-api-access-9w8c5\") pod \"ceilometer-0\" (UID: \"6253c12a-e41a-4476-abda-3b3b7ff084b3\") " pod="openstack/ceilometer-0" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.302455 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6253c12a-e41a-4476-abda-3b3b7ff084b3-scripts\") pod \"ceilometer-0\" (UID: \"6253c12a-e41a-4476-abda-3b3b7ff084b3\") " pod="openstack/ceilometer-0" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.342451 4903 scope.go:117] "RemoveContainer" containerID="317e5f446e10af6dca72c092a896217f03e02ef767532b1d67e9df1e41c64263" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.405457 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6253c12a-e41a-4476-abda-3b3b7ff084b3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6253c12a-e41a-4476-abda-3b3b7ff084b3\") " pod="openstack/ceilometer-0" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.405509 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6253c12a-e41a-4476-abda-3b3b7ff084b3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6253c12a-e41a-4476-abda-3b3b7ff084b3\") " pod="openstack/ceilometer-0" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.405548 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9w8c5\" (UniqueName: \"kubernetes.io/projected/6253c12a-e41a-4476-abda-3b3b7ff084b3-kube-api-access-9w8c5\") pod \"ceilometer-0\" (UID: \"6253c12a-e41a-4476-abda-3b3b7ff084b3\") " pod="openstack/ceilometer-0" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.405571 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6253c12a-e41a-4476-abda-3b3b7ff084b3-scripts\") pod \"ceilometer-0\" (UID: \"6253c12a-e41a-4476-abda-3b3b7ff084b3\") " pod="openstack/ceilometer-0" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.405637 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6253c12a-e41a-4476-abda-3b3b7ff084b3-config-data\") pod \"ceilometer-0\" (UID: \"6253c12a-e41a-4476-abda-3b3b7ff084b3\") " pod="openstack/ceilometer-0" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.405663 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6253c12a-e41a-4476-abda-3b3b7ff084b3-log-httpd\") pod \"ceilometer-0\" (UID: \"6253c12a-e41a-4476-abda-3b3b7ff084b3\") " pod="openstack/ceilometer-0" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.405748 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6253c12a-e41a-4476-abda-3b3b7ff084b3-run-httpd\") pod \"ceilometer-0\" (UID: \"6253c12a-e41a-4476-abda-3b3b7ff084b3\") " pod="openstack/ceilometer-0" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.406141 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6253c12a-e41a-4476-abda-3b3b7ff084b3-run-httpd\") pod \"ceilometer-0\" (UID: \"6253c12a-e41a-4476-abda-3b3b7ff084b3\") " pod="openstack/ceilometer-0" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.408319 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6253c12a-e41a-4476-abda-3b3b7ff084b3-log-httpd\") pod \"ceilometer-0\" (UID: \"6253c12a-e41a-4476-abda-3b3b7ff084b3\") " pod="openstack/ceilometer-0" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.411656 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6253c12a-e41a-4476-abda-3b3b7ff084b3-config-data\") pod \"ceilometer-0\" (UID: \"6253c12a-e41a-4476-abda-3b3b7ff084b3\") " pod="openstack/ceilometer-0" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.413243 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6253c12a-e41a-4476-abda-3b3b7ff084b3-scripts\") pod \"ceilometer-0\" (UID: \"6253c12a-e41a-4476-abda-3b3b7ff084b3\") " pod="openstack/ceilometer-0" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.416954 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6253c12a-e41a-4476-abda-3b3b7ff084b3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6253c12a-e41a-4476-abda-3b3b7ff084b3\") " pod="openstack/ceilometer-0" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.423470 4903 scope.go:117] "RemoveContainer" containerID="efcfffbe103af9fc6f3cbdc3d34fd2fe439220f5ee3d1eae14df35ec0bb91994" Nov 26 22:44:36 crc kubenswrapper[4903]: E1126 22:44:36.423866 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"efcfffbe103af9fc6f3cbdc3d34fd2fe439220f5ee3d1eae14df35ec0bb91994\": container with ID starting with efcfffbe103af9fc6f3cbdc3d34fd2fe439220f5ee3d1eae14df35ec0bb91994 not found: ID does not exist" containerID="efcfffbe103af9fc6f3cbdc3d34fd2fe439220f5ee3d1eae14df35ec0bb91994" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.423898 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"efcfffbe103af9fc6f3cbdc3d34fd2fe439220f5ee3d1eae14df35ec0bb91994"} err="failed to get container status \"efcfffbe103af9fc6f3cbdc3d34fd2fe439220f5ee3d1eae14df35ec0bb91994\": rpc error: code = NotFound desc = could not find container \"efcfffbe103af9fc6f3cbdc3d34fd2fe439220f5ee3d1eae14df35ec0bb91994\": container with ID starting with efcfffbe103af9fc6f3cbdc3d34fd2fe439220f5ee3d1eae14df35ec0bb91994 not found: ID does not exist" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.423918 4903 scope.go:117] "RemoveContainer" containerID="86e5393a270551fd7051acf2584c2dac025018dd27a3b11c07e8f2378ca3b3ff" Nov 26 22:44:36 crc kubenswrapper[4903]: E1126 22:44:36.424172 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86e5393a270551fd7051acf2584c2dac025018dd27a3b11c07e8f2378ca3b3ff\": container with ID starting with 86e5393a270551fd7051acf2584c2dac025018dd27a3b11c07e8f2378ca3b3ff not found: ID does not exist" containerID="86e5393a270551fd7051acf2584c2dac025018dd27a3b11c07e8f2378ca3b3ff" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.424215 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86e5393a270551fd7051acf2584c2dac025018dd27a3b11c07e8f2378ca3b3ff"} err="failed to get container status \"86e5393a270551fd7051acf2584c2dac025018dd27a3b11c07e8f2378ca3b3ff\": rpc error: code = NotFound desc = could not find container \"86e5393a270551fd7051acf2584c2dac025018dd27a3b11c07e8f2378ca3b3ff\": container with ID starting with 86e5393a270551fd7051acf2584c2dac025018dd27a3b11c07e8f2378ca3b3ff not found: ID does not exist" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.424230 4903 scope.go:117] "RemoveContainer" containerID="99e7a3e6b161c61366d2951bf2985f149da9355c22a3c3b41360647f78178588" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.424516 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9w8c5\" (UniqueName: \"kubernetes.io/projected/6253c12a-e41a-4476-abda-3b3b7ff084b3-kube-api-access-9w8c5\") pod \"ceilometer-0\" (UID: \"6253c12a-e41a-4476-abda-3b3b7ff084b3\") " pod="openstack/ceilometer-0" Nov 26 22:44:36 crc kubenswrapper[4903]: E1126 22:44:36.424651 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99e7a3e6b161c61366d2951bf2985f149da9355c22a3c3b41360647f78178588\": container with ID starting with 99e7a3e6b161c61366d2951bf2985f149da9355c22a3c3b41360647f78178588 not found: ID does not exist" containerID="99e7a3e6b161c61366d2951bf2985f149da9355c22a3c3b41360647f78178588" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.424674 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99e7a3e6b161c61366d2951bf2985f149da9355c22a3c3b41360647f78178588"} err="failed to get container status \"99e7a3e6b161c61366d2951bf2985f149da9355c22a3c3b41360647f78178588\": rpc error: code = NotFound desc = could not find container \"99e7a3e6b161c61366d2951bf2985f149da9355c22a3c3b41360647f78178588\": container with ID starting with 99e7a3e6b161c61366d2951bf2985f149da9355c22a3c3b41360647f78178588 not found: ID does not exist" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.424698 4903 scope.go:117] "RemoveContainer" containerID="317e5f446e10af6dca72c092a896217f03e02ef767532b1d67e9df1e41c64263" Nov 26 22:44:36 crc kubenswrapper[4903]: E1126 22:44:36.425396 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"317e5f446e10af6dca72c092a896217f03e02ef767532b1d67e9df1e41c64263\": container with ID starting with 317e5f446e10af6dca72c092a896217f03e02ef767532b1d67e9df1e41c64263 not found: ID does not exist" containerID="317e5f446e10af6dca72c092a896217f03e02ef767532b1d67e9df1e41c64263" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.425421 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"317e5f446e10af6dca72c092a896217f03e02ef767532b1d67e9df1e41c64263"} err="failed to get container status \"317e5f446e10af6dca72c092a896217f03e02ef767532b1d67e9df1e41c64263\": rpc error: code = NotFound desc = could not find container \"317e5f446e10af6dca72c092a896217f03e02ef767532b1d67e9df1e41c64263\": container with ID starting with 317e5f446e10af6dca72c092a896217f03e02ef767532b1d67e9df1e41c64263 not found: ID does not exist" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.425433 4903 scope.go:117] "RemoveContainer" containerID="efcfffbe103af9fc6f3cbdc3d34fd2fe439220f5ee3d1eae14df35ec0bb91994" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.425598 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"efcfffbe103af9fc6f3cbdc3d34fd2fe439220f5ee3d1eae14df35ec0bb91994"} err="failed to get container status \"efcfffbe103af9fc6f3cbdc3d34fd2fe439220f5ee3d1eae14df35ec0bb91994\": rpc error: code = NotFound desc = could not find container \"efcfffbe103af9fc6f3cbdc3d34fd2fe439220f5ee3d1eae14df35ec0bb91994\": container with ID starting with efcfffbe103af9fc6f3cbdc3d34fd2fe439220f5ee3d1eae14df35ec0bb91994 not found: ID does not exist" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.425610 4903 scope.go:117] "RemoveContainer" containerID="86e5393a270551fd7051acf2584c2dac025018dd27a3b11c07e8f2378ca3b3ff" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.425829 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86e5393a270551fd7051acf2584c2dac025018dd27a3b11c07e8f2378ca3b3ff"} err="failed to get container status \"86e5393a270551fd7051acf2584c2dac025018dd27a3b11c07e8f2378ca3b3ff\": rpc error: code = NotFound desc = could not find container \"86e5393a270551fd7051acf2584c2dac025018dd27a3b11c07e8f2378ca3b3ff\": container with ID starting with 86e5393a270551fd7051acf2584c2dac025018dd27a3b11c07e8f2378ca3b3ff not found: ID does not exist" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.425844 4903 scope.go:117] "RemoveContainer" containerID="99e7a3e6b161c61366d2951bf2985f149da9355c22a3c3b41360647f78178588" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.426068 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99e7a3e6b161c61366d2951bf2985f149da9355c22a3c3b41360647f78178588"} err="failed to get container status \"99e7a3e6b161c61366d2951bf2985f149da9355c22a3c3b41360647f78178588\": rpc error: code = NotFound desc = could not find container \"99e7a3e6b161c61366d2951bf2985f149da9355c22a3c3b41360647f78178588\": container with ID starting with 99e7a3e6b161c61366d2951bf2985f149da9355c22a3c3b41360647f78178588 not found: ID does not exist" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.426080 4903 scope.go:117] "RemoveContainer" containerID="317e5f446e10af6dca72c092a896217f03e02ef767532b1d67e9df1e41c64263" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.426273 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"317e5f446e10af6dca72c092a896217f03e02ef767532b1d67e9df1e41c64263"} err="failed to get container status \"317e5f446e10af6dca72c092a896217f03e02ef767532b1d67e9df1e41c64263\": rpc error: code = NotFound desc = could not find container \"317e5f446e10af6dca72c092a896217f03e02ef767532b1d67e9df1e41c64263\": container with ID starting with 317e5f446e10af6dca72c092a896217f03e02ef767532b1d67e9df1e41c64263 not found: ID does not exist" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.434820 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6253c12a-e41a-4476-abda-3b3b7ff084b3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6253c12a-e41a-4476-abda-3b3b7ff084b3\") " pod="openstack/ceilometer-0" Nov 26 22:44:36 crc kubenswrapper[4903]: I1126 22:44:36.556070 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:44:37 crc kubenswrapper[4903]: W1126 22:44:37.010967 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6253c12a_e41a_4476_abda_3b3b7ff084b3.slice/crio-3e9e9e281aeb9e63b16c87ae630152545d40d052a08eb2df12e965bbb2c6776f WatchSource:0}: Error finding container 3e9e9e281aeb9e63b16c87ae630152545d40d052a08eb2df12e965bbb2c6776f: Status 404 returned error can't find the container with id 3e9e9e281aeb9e63b16c87ae630152545d40d052a08eb2df12e965bbb2c6776f Nov 26 22:44:37 crc kubenswrapper[4903]: I1126 22:44:37.020674 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:44:37 crc kubenswrapper[4903]: I1126 22:44:37.153566 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6253c12a-e41a-4476-abda-3b3b7ff084b3","Type":"ContainerStarted","Data":"3e9e9e281aeb9e63b16c87ae630152545d40d052a08eb2df12e965bbb2c6776f"} Nov 26 22:44:37 crc kubenswrapper[4903]: I1126 22:44:37.230622 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:44:37 crc kubenswrapper[4903]: I1126 22:44:37.717673 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:44:37 crc kubenswrapper[4903]: I1126 22:44:37.738422 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:44:38 crc kubenswrapper[4903]: I1126 22:44:38.046841 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fec2f7d7-23f9-424b-a0db-fc318d09665a" path="/var/lib/kubelet/pods/fec2f7d7-23f9-424b-a0db-fc318d09665a/volumes" Nov 26 22:44:38 crc kubenswrapper[4903]: I1126 22:44:38.169295 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6253c12a-e41a-4476-abda-3b3b7ff084b3","Type":"ContainerStarted","Data":"56d7355f2c8cedebe93c70435d337d4610bfddc024f6d1bacba4aef29dc5bf32"} Nov 26 22:44:38 crc kubenswrapper[4903]: I1126 22:44:38.199683 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 26 22:44:38 crc kubenswrapper[4903]: I1126 22:44:38.383956 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-jclt8"] Nov 26 22:44:38 crc kubenswrapper[4903]: I1126 22:44:38.385793 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-jclt8" Nov 26 22:44:38 crc kubenswrapper[4903]: I1126 22:44:38.388401 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 26 22:44:38 crc kubenswrapper[4903]: I1126 22:44:38.388684 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 26 22:44:38 crc kubenswrapper[4903]: I1126 22:44:38.402339 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-jclt8"] Nov 26 22:44:38 crc kubenswrapper[4903]: I1126 22:44:38.466408 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6-config-data\") pod \"nova-cell1-cell-mapping-jclt8\" (UID: \"fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6\") " pod="openstack/nova-cell1-cell-mapping-jclt8" Nov 26 22:44:38 crc kubenswrapper[4903]: I1126 22:44:38.466484 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lscgg\" (UniqueName: \"kubernetes.io/projected/fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6-kube-api-access-lscgg\") pod \"nova-cell1-cell-mapping-jclt8\" (UID: \"fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6\") " pod="openstack/nova-cell1-cell-mapping-jclt8" Nov 26 22:44:38 crc kubenswrapper[4903]: I1126 22:44:38.466525 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-jclt8\" (UID: \"fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6\") " pod="openstack/nova-cell1-cell-mapping-jclt8" Nov 26 22:44:38 crc kubenswrapper[4903]: I1126 22:44:38.466591 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6-scripts\") pod \"nova-cell1-cell-mapping-jclt8\" (UID: \"fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6\") " pod="openstack/nova-cell1-cell-mapping-jclt8" Nov 26 22:44:38 crc kubenswrapper[4903]: I1126 22:44:38.568804 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6-config-data\") pod \"nova-cell1-cell-mapping-jclt8\" (UID: \"fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6\") " pod="openstack/nova-cell1-cell-mapping-jclt8" Nov 26 22:44:38 crc kubenswrapper[4903]: I1126 22:44:38.568876 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lscgg\" (UniqueName: \"kubernetes.io/projected/fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6-kube-api-access-lscgg\") pod \"nova-cell1-cell-mapping-jclt8\" (UID: \"fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6\") " pod="openstack/nova-cell1-cell-mapping-jclt8" Nov 26 22:44:38 crc kubenswrapper[4903]: I1126 22:44:38.568922 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-jclt8\" (UID: \"fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6\") " pod="openstack/nova-cell1-cell-mapping-jclt8" Nov 26 22:44:38 crc kubenswrapper[4903]: I1126 22:44:38.568987 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6-scripts\") pod \"nova-cell1-cell-mapping-jclt8\" (UID: \"fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6\") " pod="openstack/nova-cell1-cell-mapping-jclt8" Nov 26 22:44:38 crc kubenswrapper[4903]: I1126 22:44:38.585107 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-jclt8\" (UID: \"fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6\") " pod="openstack/nova-cell1-cell-mapping-jclt8" Nov 26 22:44:38 crc kubenswrapper[4903]: I1126 22:44:38.585134 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6-config-data\") pod \"nova-cell1-cell-mapping-jclt8\" (UID: \"fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6\") " pod="openstack/nova-cell1-cell-mapping-jclt8" Nov 26 22:44:38 crc kubenswrapper[4903]: I1126 22:44:38.585168 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6-scripts\") pod \"nova-cell1-cell-mapping-jclt8\" (UID: \"fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6\") " pod="openstack/nova-cell1-cell-mapping-jclt8" Nov 26 22:44:38 crc kubenswrapper[4903]: I1126 22:44:38.592011 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lscgg\" (UniqueName: \"kubernetes.io/projected/fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6-kube-api-access-lscgg\") pod \"nova-cell1-cell-mapping-jclt8\" (UID: \"fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6\") " pod="openstack/nova-cell1-cell-mapping-jclt8" Nov 26 22:44:38 crc kubenswrapper[4903]: I1126 22:44:38.790746 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-jclt8" Nov 26 22:44:38 crc kubenswrapper[4903]: I1126 22:44:38.947145 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.088972 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/400ca6f8-5d9f-4da1-94ea-658585d32f21-logs\") pod \"400ca6f8-5d9f-4da1-94ea-658585d32f21\" (UID: \"400ca6f8-5d9f-4da1-94ea-658585d32f21\") " Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.089079 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/400ca6f8-5d9f-4da1-94ea-658585d32f21-config-data\") pod \"400ca6f8-5d9f-4da1-94ea-658585d32f21\" (UID: \"400ca6f8-5d9f-4da1-94ea-658585d32f21\") " Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.089371 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/400ca6f8-5d9f-4da1-94ea-658585d32f21-combined-ca-bundle\") pod \"400ca6f8-5d9f-4da1-94ea-658585d32f21\" (UID: \"400ca6f8-5d9f-4da1-94ea-658585d32f21\") " Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.089393 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dddg4\" (UniqueName: \"kubernetes.io/projected/400ca6f8-5d9f-4da1-94ea-658585d32f21-kube-api-access-dddg4\") pod \"400ca6f8-5d9f-4da1-94ea-658585d32f21\" (UID: \"400ca6f8-5d9f-4da1-94ea-658585d32f21\") " Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.091755 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/400ca6f8-5d9f-4da1-94ea-658585d32f21-logs" (OuterVolumeSpecName: "logs") pod "400ca6f8-5d9f-4da1-94ea-658585d32f21" (UID: "400ca6f8-5d9f-4da1-94ea-658585d32f21"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.104915 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/400ca6f8-5d9f-4da1-94ea-658585d32f21-kube-api-access-dddg4" (OuterVolumeSpecName: "kube-api-access-dddg4") pod "400ca6f8-5d9f-4da1-94ea-658585d32f21" (UID: "400ca6f8-5d9f-4da1-94ea-658585d32f21"). InnerVolumeSpecName "kube-api-access-dddg4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.131814 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/400ca6f8-5d9f-4da1-94ea-658585d32f21-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "400ca6f8-5d9f-4da1-94ea-658585d32f21" (UID: "400ca6f8-5d9f-4da1-94ea-658585d32f21"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.141732 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/400ca6f8-5d9f-4da1-94ea-658585d32f21-config-data" (OuterVolumeSpecName: "config-data") pod "400ca6f8-5d9f-4da1-94ea-658585d32f21" (UID: "400ca6f8-5d9f-4da1-94ea-658585d32f21"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.187128 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.187181 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"400ca6f8-5d9f-4da1-94ea-658585d32f21","Type":"ContainerDied","Data":"290f6900ade24159f2d5c047b2d7b83e3de31485f75aede21928f14f742fddac"} Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.187237 4903 scope.go:117] "RemoveContainer" containerID="290f6900ade24159f2d5c047b2d7b83e3de31485f75aede21928f14f742fddac" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.188909 4903 generic.go:334] "Generic (PLEG): container finished" podID="400ca6f8-5d9f-4da1-94ea-658585d32f21" containerID="290f6900ade24159f2d5c047b2d7b83e3de31485f75aede21928f14f742fddac" exitCode=0 Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.189210 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"400ca6f8-5d9f-4da1-94ea-658585d32f21","Type":"ContainerDied","Data":"3ad52ba33a074beb83e8828621048a596367d69d6d23e0e880830d10c251c0fc"} Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.192310 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/400ca6f8-5d9f-4da1-94ea-658585d32f21-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.192343 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dddg4\" (UniqueName: \"kubernetes.io/projected/400ca6f8-5d9f-4da1-94ea-658585d32f21-kube-api-access-dddg4\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.192358 4903 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/400ca6f8-5d9f-4da1-94ea-658585d32f21-logs\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.192369 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/400ca6f8-5d9f-4da1-94ea-658585d32f21-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.195667 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6253c12a-e41a-4476-abda-3b3b7ff084b3","Type":"ContainerStarted","Data":"6c029c5b88d3c27e6a18dbc9144642e77c354036f5c8b70d3e13b82f6b6ddda7"} Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.214170 4903 scope.go:117] "RemoveContainer" containerID="914b6bfe9b5f5703737c2612d6a3adbd764425a70140f642413306a9ccff16c7" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.243715 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.253313 4903 scope.go:117] "RemoveContainer" containerID="290f6900ade24159f2d5c047b2d7b83e3de31485f75aede21928f14f742fddac" Nov 26 22:44:39 crc kubenswrapper[4903]: E1126 22:44:39.257373 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"290f6900ade24159f2d5c047b2d7b83e3de31485f75aede21928f14f742fddac\": container with ID starting with 290f6900ade24159f2d5c047b2d7b83e3de31485f75aede21928f14f742fddac not found: ID does not exist" containerID="290f6900ade24159f2d5c047b2d7b83e3de31485f75aede21928f14f742fddac" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.257411 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"290f6900ade24159f2d5c047b2d7b83e3de31485f75aede21928f14f742fddac"} err="failed to get container status \"290f6900ade24159f2d5c047b2d7b83e3de31485f75aede21928f14f742fddac\": rpc error: code = NotFound desc = could not find container \"290f6900ade24159f2d5c047b2d7b83e3de31485f75aede21928f14f742fddac\": container with ID starting with 290f6900ade24159f2d5c047b2d7b83e3de31485f75aede21928f14f742fddac not found: ID does not exist" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.257435 4903 scope.go:117] "RemoveContainer" containerID="914b6bfe9b5f5703737c2612d6a3adbd764425a70140f642413306a9ccff16c7" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.258469 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 26 22:44:39 crc kubenswrapper[4903]: E1126 22:44:39.258957 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"914b6bfe9b5f5703737c2612d6a3adbd764425a70140f642413306a9ccff16c7\": container with ID starting with 914b6bfe9b5f5703737c2612d6a3adbd764425a70140f642413306a9ccff16c7 not found: ID does not exist" containerID="914b6bfe9b5f5703737c2612d6a3adbd764425a70140f642413306a9ccff16c7" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.258990 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"914b6bfe9b5f5703737c2612d6a3adbd764425a70140f642413306a9ccff16c7"} err="failed to get container status \"914b6bfe9b5f5703737c2612d6a3adbd764425a70140f642413306a9ccff16c7\": rpc error: code = NotFound desc = could not find container \"914b6bfe9b5f5703737c2612d6a3adbd764425a70140f642413306a9ccff16c7\": container with ID starting with 914b6bfe9b5f5703737c2612d6a3adbd764425a70140f642413306a9ccff16c7 not found: ID does not exist" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.273549 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 22:44:39 crc kubenswrapper[4903]: E1126 22:44:39.274088 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="400ca6f8-5d9f-4da1-94ea-658585d32f21" containerName="nova-api-log" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.274104 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="400ca6f8-5d9f-4da1-94ea-658585d32f21" containerName="nova-api-log" Nov 26 22:44:39 crc kubenswrapper[4903]: E1126 22:44:39.274145 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="400ca6f8-5d9f-4da1-94ea-658585d32f21" containerName="nova-api-api" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.274152 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="400ca6f8-5d9f-4da1-94ea-658585d32f21" containerName="nova-api-api" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.274346 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="400ca6f8-5d9f-4da1-94ea-658585d32f21" containerName="nova-api-api" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.274374 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="400ca6f8-5d9f-4da1-94ea-658585d32f21" containerName="nova-api-log" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.275567 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.281861 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.282565 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.286803 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.302157 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.376988 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-jclt8"] Nov 26 22:44:39 crc kubenswrapper[4903]: W1126 22:44:39.377831 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfdfbae1a_cfe8_4496_8e4d_2b4255dd34b6.slice/crio-e30cf0f4ecc38696cdfaf7d934cf49ca0b5f19120c0a87b6b533a0da647c5a17 WatchSource:0}: Error finding container e30cf0f4ecc38696cdfaf7d934cf49ca0b5f19120c0a87b6b533a0da647c5a17: Status 404 returned error can't find the container with id e30cf0f4ecc38696cdfaf7d934cf49ca0b5f19120c0a87b6b533a0da647c5a17 Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.409071 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc716136-3185-4571-83e6-7c51a65bff4a-config-data\") pod \"nova-api-0\" (UID: \"cc716136-3185-4571-83e6-7c51a65bff4a\") " pod="openstack/nova-api-0" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.409167 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc716136-3185-4571-83e6-7c51a65bff4a-logs\") pod \"nova-api-0\" (UID: \"cc716136-3185-4571-83e6-7c51a65bff4a\") " pod="openstack/nova-api-0" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.409219 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc716136-3185-4571-83e6-7c51a65bff4a-public-tls-certs\") pod \"nova-api-0\" (UID: \"cc716136-3185-4571-83e6-7c51a65bff4a\") " pod="openstack/nova-api-0" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.409261 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc716136-3185-4571-83e6-7c51a65bff4a-internal-tls-certs\") pod \"nova-api-0\" (UID: \"cc716136-3185-4571-83e6-7c51a65bff4a\") " pod="openstack/nova-api-0" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.409290 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6v7js\" (UniqueName: \"kubernetes.io/projected/cc716136-3185-4571-83e6-7c51a65bff4a-kube-api-access-6v7js\") pod \"nova-api-0\" (UID: \"cc716136-3185-4571-83e6-7c51a65bff4a\") " pod="openstack/nova-api-0" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.409423 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc716136-3185-4571-83e6-7c51a65bff4a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cc716136-3185-4571-83e6-7c51a65bff4a\") " pod="openstack/nova-api-0" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.528157 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc716136-3185-4571-83e6-7c51a65bff4a-config-data\") pod \"nova-api-0\" (UID: \"cc716136-3185-4571-83e6-7c51a65bff4a\") " pod="openstack/nova-api-0" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.528453 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc716136-3185-4571-83e6-7c51a65bff4a-logs\") pod \"nova-api-0\" (UID: \"cc716136-3185-4571-83e6-7c51a65bff4a\") " pod="openstack/nova-api-0" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.528490 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc716136-3185-4571-83e6-7c51a65bff4a-public-tls-certs\") pod \"nova-api-0\" (UID: \"cc716136-3185-4571-83e6-7c51a65bff4a\") " pod="openstack/nova-api-0" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.528533 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc716136-3185-4571-83e6-7c51a65bff4a-internal-tls-certs\") pod \"nova-api-0\" (UID: \"cc716136-3185-4571-83e6-7c51a65bff4a\") " pod="openstack/nova-api-0" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.528565 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6v7js\" (UniqueName: \"kubernetes.io/projected/cc716136-3185-4571-83e6-7c51a65bff4a-kube-api-access-6v7js\") pod \"nova-api-0\" (UID: \"cc716136-3185-4571-83e6-7c51a65bff4a\") " pod="openstack/nova-api-0" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.528616 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc716136-3185-4571-83e6-7c51a65bff4a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cc716136-3185-4571-83e6-7c51a65bff4a\") " pod="openstack/nova-api-0" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.546011 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc716136-3185-4571-83e6-7c51a65bff4a-config-data\") pod \"nova-api-0\" (UID: \"cc716136-3185-4571-83e6-7c51a65bff4a\") " pod="openstack/nova-api-0" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.546666 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc716136-3185-4571-83e6-7c51a65bff4a-logs\") pod \"nova-api-0\" (UID: \"cc716136-3185-4571-83e6-7c51a65bff4a\") " pod="openstack/nova-api-0" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.547323 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc716136-3185-4571-83e6-7c51a65bff4a-public-tls-certs\") pod \"nova-api-0\" (UID: \"cc716136-3185-4571-83e6-7c51a65bff4a\") " pod="openstack/nova-api-0" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.547876 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc716136-3185-4571-83e6-7c51a65bff4a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cc716136-3185-4571-83e6-7c51a65bff4a\") " pod="openstack/nova-api-0" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.564257 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc716136-3185-4571-83e6-7c51a65bff4a-internal-tls-certs\") pod \"nova-api-0\" (UID: \"cc716136-3185-4571-83e6-7c51a65bff4a\") " pod="openstack/nova-api-0" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.584285 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6v7js\" (UniqueName: \"kubernetes.io/projected/cc716136-3185-4571-83e6-7c51a65bff4a-kube-api-access-6v7js\") pod \"nova-api-0\" (UID: \"cc716136-3185-4571-83e6-7c51a65bff4a\") " pod="openstack/nova-api-0" Nov 26 22:44:39 crc kubenswrapper[4903]: I1126 22:44:39.618275 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 22:44:40 crc kubenswrapper[4903]: I1126 22:44:40.042818 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="400ca6f8-5d9f-4da1-94ea-658585d32f21" path="/var/lib/kubelet/pods/400ca6f8-5d9f-4da1-94ea-658585d32f21/volumes" Nov 26 22:44:40 crc kubenswrapper[4903]: I1126 22:44:40.154833 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 22:44:40 crc kubenswrapper[4903]: I1126 22:44:40.217572 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6253c12a-e41a-4476-abda-3b3b7ff084b3","Type":"ContainerStarted","Data":"94f3cd250463eb6fe09bd7e75991098991256d86a369bb54a97bf8a13f8b3e8d"} Nov 26 22:44:40 crc kubenswrapper[4903]: I1126 22:44:40.219067 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-jclt8" event={"ID":"fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6","Type":"ContainerStarted","Data":"b4acb01676addc4a31a9a6154c8702b0e59ee8687425da49a5197dca79d739e3"} Nov 26 22:44:40 crc kubenswrapper[4903]: I1126 22:44:40.219110 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-jclt8" event={"ID":"fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6","Type":"ContainerStarted","Data":"e30cf0f4ecc38696cdfaf7d934cf49ca0b5f19120c0a87b6b533a0da647c5a17"} Nov 26 22:44:40 crc kubenswrapper[4903]: I1126 22:44:40.222024 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cc716136-3185-4571-83e6-7c51a65bff4a","Type":"ContainerStarted","Data":"ff1758dff4d1b664af3867b2234eb7da34044b2b916ddf11b180e2a26ebd2102"} Nov 26 22:44:40 crc kubenswrapper[4903]: I1126 22:44:40.253174 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-jclt8" podStartSLOduration=2.253155379 podStartE2EDuration="2.253155379s" podCreationTimestamp="2025-11-26 22:44:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:44:40.232148105 +0000 UTC m=+1408.922383015" watchObservedRunningTime="2025-11-26 22:44:40.253155379 +0000 UTC m=+1408.943390289" Nov 26 22:44:41 crc kubenswrapper[4903]: I1126 22:44:41.234363 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6253c12a-e41a-4476-abda-3b3b7ff084b3","Type":"ContainerStarted","Data":"69e1f212900b26fe09799a7cd49a2e680275e843290cdf02bb9099d5635b7af0"} Nov 26 22:44:41 crc kubenswrapper[4903]: I1126 22:44:41.234469 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6253c12a-e41a-4476-abda-3b3b7ff084b3" containerName="ceilometer-central-agent" containerID="cri-o://56d7355f2c8cedebe93c70435d337d4610bfddc024f6d1bacba4aef29dc5bf32" gracePeriod=30 Nov 26 22:44:41 crc kubenswrapper[4903]: I1126 22:44:41.234512 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6253c12a-e41a-4476-abda-3b3b7ff084b3" containerName="proxy-httpd" containerID="cri-o://69e1f212900b26fe09799a7cd49a2e680275e843290cdf02bb9099d5635b7af0" gracePeriod=30 Nov 26 22:44:41 crc kubenswrapper[4903]: I1126 22:44:41.234600 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6253c12a-e41a-4476-abda-3b3b7ff084b3" containerName="ceilometer-notification-agent" containerID="cri-o://6c029c5b88d3c27e6a18dbc9144642e77c354036f5c8b70d3e13b82f6b6ddda7" gracePeriod=30 Nov 26 22:44:41 crc kubenswrapper[4903]: I1126 22:44:41.234613 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6253c12a-e41a-4476-abda-3b3b7ff084b3" containerName="sg-core" containerID="cri-o://94f3cd250463eb6fe09bd7e75991098991256d86a369bb54a97bf8a13f8b3e8d" gracePeriod=30 Nov 26 22:44:41 crc kubenswrapper[4903]: I1126 22:44:41.234828 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 22:44:41 crc kubenswrapper[4903]: I1126 22:44:41.240265 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cc716136-3185-4571-83e6-7c51a65bff4a","Type":"ContainerStarted","Data":"97f60bf57ad409f5c67757adb94272c47f096ef860cf62e76ce55da16a7abde7"} Nov 26 22:44:41 crc kubenswrapper[4903]: I1126 22:44:41.240300 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cc716136-3185-4571-83e6-7c51a65bff4a","Type":"ContainerStarted","Data":"da1b6031f44a30125574cf3bb344c5143a3863279b2661f649acf6e9d74d2697"} Nov 26 22:44:41 crc kubenswrapper[4903]: I1126 22:44:41.261589 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.269425007 podStartE2EDuration="5.261573047s" podCreationTimestamp="2025-11-26 22:44:36 +0000 UTC" firstStartedPulling="2025-11-26 22:44:37.01378707 +0000 UTC m=+1405.704022010" lastFinishedPulling="2025-11-26 22:44:41.00593514 +0000 UTC m=+1409.696170050" observedRunningTime="2025-11-26 22:44:41.255419201 +0000 UTC m=+1409.945654161" watchObservedRunningTime="2025-11-26 22:44:41.261573047 +0000 UTC m=+1409.951807957" Nov 26 22:44:41 crc kubenswrapper[4903]: I1126 22:44:41.283466 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.2834464629999998 podStartE2EDuration="2.283446463s" podCreationTimestamp="2025-11-26 22:44:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:44:41.277614857 +0000 UTC m=+1409.967849767" watchObservedRunningTime="2025-11-26 22:44:41.283446463 +0000 UTC m=+1409.973681373" Nov 26 22:44:42 crc kubenswrapper[4903]: I1126 22:44:42.255625 4903 generic.go:334] "Generic (PLEG): container finished" podID="6253c12a-e41a-4476-abda-3b3b7ff084b3" containerID="94f3cd250463eb6fe09bd7e75991098991256d86a369bb54a97bf8a13f8b3e8d" exitCode=2 Nov 26 22:44:42 crc kubenswrapper[4903]: I1126 22:44:42.256654 4903 generic.go:334] "Generic (PLEG): container finished" podID="6253c12a-e41a-4476-abda-3b3b7ff084b3" containerID="6c029c5b88d3c27e6a18dbc9144642e77c354036f5c8b70d3e13b82f6b6ddda7" exitCode=0 Nov 26 22:44:42 crc kubenswrapper[4903]: I1126 22:44:42.256537 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6253c12a-e41a-4476-abda-3b3b7ff084b3","Type":"ContainerDied","Data":"94f3cd250463eb6fe09bd7e75991098991256d86a369bb54a97bf8a13f8b3e8d"} Nov 26 22:44:42 crc kubenswrapper[4903]: I1126 22:44:42.256957 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6253c12a-e41a-4476-abda-3b3b7ff084b3","Type":"ContainerDied","Data":"6c029c5b88d3c27e6a18dbc9144642e77c354036f5c8b70d3e13b82f6b6ddda7"} Nov 26 22:44:42 crc kubenswrapper[4903]: I1126 22:44:42.651236 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" Nov 26 22:44:42 crc kubenswrapper[4903]: I1126 22:44:42.794164 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-9b86998b5-7w8wv"] Nov 26 22:44:42 crc kubenswrapper[4903]: I1126 22:44:42.794720 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" podUID="96696972-c8ae-4828-b9d4-78825bd31e3f" containerName="dnsmasq-dns" containerID="cri-o://afeec996aa8e6ffd5b6d9105ee3f55b83353f9e96e6f138bc66bbb84aaf36703" gracePeriod=10 Nov 26 22:44:43 crc kubenswrapper[4903]: I1126 22:44:43.270796 4903 generic.go:334] "Generic (PLEG): container finished" podID="96696972-c8ae-4828-b9d4-78825bd31e3f" containerID="afeec996aa8e6ffd5b6d9105ee3f55b83353f9e96e6f138bc66bbb84aaf36703" exitCode=0 Nov 26 22:44:43 crc kubenswrapper[4903]: I1126 22:44:43.270848 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" event={"ID":"96696972-c8ae-4828-b9d4-78825bd31e3f","Type":"ContainerDied","Data":"afeec996aa8e6ffd5b6d9105ee3f55b83353f9e96e6f138bc66bbb84aaf36703"} Nov 26 22:44:43 crc kubenswrapper[4903]: I1126 22:44:43.469560 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" Nov 26 22:44:43 crc kubenswrapper[4903]: I1126 22:44:43.537579 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/96696972-c8ae-4828-b9d4-78825bd31e3f-dns-swift-storage-0\") pod \"96696972-c8ae-4828-b9d4-78825bd31e3f\" (UID: \"96696972-c8ae-4828-b9d4-78825bd31e3f\") " Nov 26 22:44:43 crc kubenswrapper[4903]: I1126 22:44:43.537626 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/96696972-c8ae-4828-b9d4-78825bd31e3f-ovsdbserver-nb\") pod \"96696972-c8ae-4828-b9d4-78825bd31e3f\" (UID: \"96696972-c8ae-4828-b9d4-78825bd31e3f\") " Nov 26 22:44:43 crc kubenswrapper[4903]: I1126 22:44:43.537740 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/96696972-c8ae-4828-b9d4-78825bd31e3f-ovsdbserver-sb\") pod \"96696972-c8ae-4828-b9d4-78825bd31e3f\" (UID: \"96696972-c8ae-4828-b9d4-78825bd31e3f\") " Nov 26 22:44:43 crc kubenswrapper[4903]: I1126 22:44:43.537923 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/96696972-c8ae-4828-b9d4-78825bd31e3f-dns-svc\") pod \"96696972-c8ae-4828-b9d4-78825bd31e3f\" (UID: \"96696972-c8ae-4828-b9d4-78825bd31e3f\") " Nov 26 22:44:43 crc kubenswrapper[4903]: I1126 22:44:43.537967 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzj9z\" (UniqueName: \"kubernetes.io/projected/96696972-c8ae-4828-b9d4-78825bd31e3f-kube-api-access-lzj9z\") pod \"96696972-c8ae-4828-b9d4-78825bd31e3f\" (UID: \"96696972-c8ae-4828-b9d4-78825bd31e3f\") " Nov 26 22:44:43 crc kubenswrapper[4903]: I1126 22:44:43.537991 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96696972-c8ae-4828-b9d4-78825bd31e3f-config\") pod \"96696972-c8ae-4828-b9d4-78825bd31e3f\" (UID: \"96696972-c8ae-4828-b9d4-78825bd31e3f\") " Nov 26 22:44:43 crc kubenswrapper[4903]: I1126 22:44:43.546672 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96696972-c8ae-4828-b9d4-78825bd31e3f-kube-api-access-lzj9z" (OuterVolumeSpecName: "kube-api-access-lzj9z") pod "96696972-c8ae-4828-b9d4-78825bd31e3f" (UID: "96696972-c8ae-4828-b9d4-78825bd31e3f"). InnerVolumeSpecName "kube-api-access-lzj9z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:44:43 crc kubenswrapper[4903]: I1126 22:44:43.609646 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96696972-c8ae-4828-b9d4-78825bd31e3f-config" (OuterVolumeSpecName: "config") pod "96696972-c8ae-4828-b9d4-78825bd31e3f" (UID: "96696972-c8ae-4828-b9d4-78825bd31e3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:44:43 crc kubenswrapper[4903]: I1126 22:44:43.614066 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96696972-c8ae-4828-b9d4-78825bd31e3f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "96696972-c8ae-4828-b9d4-78825bd31e3f" (UID: "96696972-c8ae-4828-b9d4-78825bd31e3f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:44:43 crc kubenswrapper[4903]: I1126 22:44:43.623122 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96696972-c8ae-4828-b9d4-78825bd31e3f-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "96696972-c8ae-4828-b9d4-78825bd31e3f" (UID: "96696972-c8ae-4828-b9d4-78825bd31e3f"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:44:43 crc kubenswrapper[4903]: I1126 22:44:43.625135 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96696972-c8ae-4828-b9d4-78825bd31e3f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "96696972-c8ae-4828-b9d4-78825bd31e3f" (UID: "96696972-c8ae-4828-b9d4-78825bd31e3f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:44:43 crc kubenswrapper[4903]: I1126 22:44:43.629009 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96696972-c8ae-4828-b9d4-78825bd31e3f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "96696972-c8ae-4828-b9d4-78825bd31e3f" (UID: "96696972-c8ae-4828-b9d4-78825bd31e3f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:44:43 crc kubenswrapper[4903]: I1126 22:44:43.640307 4903 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/96696972-c8ae-4828-b9d4-78825bd31e3f-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:43 crc kubenswrapper[4903]: I1126 22:44:43.640346 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzj9z\" (UniqueName: \"kubernetes.io/projected/96696972-c8ae-4828-b9d4-78825bd31e3f-kube-api-access-lzj9z\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:43 crc kubenswrapper[4903]: I1126 22:44:43.640356 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96696972-c8ae-4828-b9d4-78825bd31e3f-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:43 crc kubenswrapper[4903]: I1126 22:44:43.640365 4903 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/96696972-c8ae-4828-b9d4-78825bd31e3f-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:43 crc kubenswrapper[4903]: I1126 22:44:43.640375 4903 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/96696972-c8ae-4828-b9d4-78825bd31e3f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:43 crc kubenswrapper[4903]: I1126 22:44:43.640384 4903 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/96696972-c8ae-4828-b9d4-78825bd31e3f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:44 crc kubenswrapper[4903]: I1126 22:44:44.286468 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" event={"ID":"96696972-c8ae-4828-b9d4-78825bd31e3f","Type":"ContainerDied","Data":"afd241d71703b7175c29b6dee20b74998db70415a0dd4913afed914d4775b14e"} Nov 26 22:44:44 crc kubenswrapper[4903]: I1126 22:44:44.286543 4903 scope.go:117] "RemoveContainer" containerID="afeec996aa8e6ffd5b6d9105ee3f55b83353f9e96e6f138bc66bbb84aaf36703" Nov 26 22:44:44 crc kubenswrapper[4903]: I1126 22:44:44.286559 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-9b86998b5-7w8wv" Nov 26 22:44:44 crc kubenswrapper[4903]: I1126 22:44:44.325278 4903 scope.go:117] "RemoveContainer" containerID="007987fee0d2a4a81f72f22648f253a11a86304f7bbdd1ab772216c4a3eb6b9d" Nov 26 22:44:44 crc kubenswrapper[4903]: I1126 22:44:44.352325 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-9b86998b5-7w8wv"] Nov 26 22:44:44 crc kubenswrapper[4903]: I1126 22:44:44.370846 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-9b86998b5-7w8wv"] Nov 26 22:44:45 crc kubenswrapper[4903]: I1126 22:44:45.303013 4903 generic.go:334] "Generic (PLEG): container finished" podID="fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6" containerID="b4acb01676addc4a31a9a6154c8702b0e59ee8687425da49a5197dca79d739e3" exitCode=0 Nov 26 22:44:45 crc kubenswrapper[4903]: I1126 22:44:45.303136 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-jclt8" event={"ID":"fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6","Type":"ContainerDied","Data":"b4acb01676addc4a31a9a6154c8702b0e59ee8687425da49a5197dca79d739e3"} Nov 26 22:44:46 crc kubenswrapper[4903]: I1126 22:44:46.045128 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96696972-c8ae-4828-b9d4-78825bd31e3f" path="/var/lib/kubelet/pods/96696972-c8ae-4828-b9d4-78825bd31e3f/volumes" Nov 26 22:44:46 crc kubenswrapper[4903]: I1126 22:44:46.323752 4903 generic.go:334] "Generic (PLEG): container finished" podID="6253c12a-e41a-4476-abda-3b3b7ff084b3" containerID="56d7355f2c8cedebe93c70435d337d4610bfddc024f6d1bacba4aef29dc5bf32" exitCode=0 Nov 26 22:44:46 crc kubenswrapper[4903]: I1126 22:44:46.323824 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6253c12a-e41a-4476-abda-3b3b7ff084b3","Type":"ContainerDied","Data":"56d7355f2c8cedebe93c70435d337d4610bfddc024f6d1bacba4aef29dc5bf32"} Nov 26 22:44:46 crc kubenswrapper[4903]: I1126 22:44:46.753143 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-jclt8" Nov 26 22:44:46 crc kubenswrapper[4903]: I1126 22:44:46.819012 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6-config-data\") pod \"fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6\" (UID: \"fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6\") " Nov 26 22:44:46 crc kubenswrapper[4903]: I1126 22:44:46.819243 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6-scripts\") pod \"fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6\" (UID: \"fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6\") " Nov 26 22:44:46 crc kubenswrapper[4903]: I1126 22:44:46.819499 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lscgg\" (UniqueName: \"kubernetes.io/projected/fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6-kube-api-access-lscgg\") pod \"fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6\" (UID: \"fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6\") " Nov 26 22:44:46 crc kubenswrapper[4903]: I1126 22:44:46.819634 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6-combined-ca-bundle\") pod \"fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6\" (UID: \"fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6\") " Nov 26 22:44:46 crc kubenswrapper[4903]: I1126 22:44:46.844929 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6-scripts" (OuterVolumeSpecName: "scripts") pod "fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6" (UID: "fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:46 crc kubenswrapper[4903]: I1126 22:44:46.845317 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6-kube-api-access-lscgg" (OuterVolumeSpecName: "kube-api-access-lscgg") pod "fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6" (UID: "fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6"). InnerVolumeSpecName "kube-api-access-lscgg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:44:46 crc kubenswrapper[4903]: I1126 22:44:46.859937 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6-config-data" (OuterVolumeSpecName: "config-data") pod "fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6" (UID: "fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:46 crc kubenswrapper[4903]: I1126 22:44:46.865111 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6" (UID: "fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:46 crc kubenswrapper[4903]: I1126 22:44:46.922426 4903 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:46 crc kubenswrapper[4903]: I1126 22:44:46.922578 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lscgg\" (UniqueName: \"kubernetes.io/projected/fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6-kube-api-access-lscgg\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:46 crc kubenswrapper[4903]: I1126 22:44:46.922707 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:46 crc kubenswrapper[4903]: I1126 22:44:46.922806 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:47 crc kubenswrapper[4903]: I1126 22:44:47.347532 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-jclt8" event={"ID":"fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6","Type":"ContainerDied","Data":"e30cf0f4ecc38696cdfaf7d934cf49ca0b5f19120c0a87b6b533a0da647c5a17"} Nov 26 22:44:47 crc kubenswrapper[4903]: I1126 22:44:47.348041 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e30cf0f4ecc38696cdfaf7d934cf49ca0b5f19120c0a87b6b533a0da647c5a17" Nov 26 22:44:47 crc kubenswrapper[4903]: I1126 22:44:47.348163 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-jclt8" Nov 26 22:44:47 crc kubenswrapper[4903]: I1126 22:44:47.554763 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 22:44:47 crc kubenswrapper[4903]: I1126 22:44:47.555133 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="218f4c78-00c2-47c5-aecb-d58e14e73b0c" containerName="nova-scheduler-scheduler" containerID="cri-o://b768da7d634e11040926ffd9959a928c6247655a4da938e79d6b0c76f3892d4b" gracePeriod=30 Nov 26 22:44:47 crc kubenswrapper[4903]: I1126 22:44:47.567132 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 22:44:47 crc kubenswrapper[4903]: I1126 22:44:47.567376 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="cc716136-3185-4571-83e6-7c51a65bff4a" containerName="nova-api-log" containerID="cri-o://da1b6031f44a30125574cf3bb344c5143a3863279b2661f649acf6e9d74d2697" gracePeriod=30 Nov 26 22:44:47 crc kubenswrapper[4903]: I1126 22:44:47.567494 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="cc716136-3185-4571-83e6-7c51a65bff4a" containerName="nova-api-api" containerID="cri-o://97f60bf57ad409f5c67757adb94272c47f096ef860cf62e76ce55da16a7abde7" gracePeriod=30 Nov 26 22:44:47 crc kubenswrapper[4903]: I1126 22:44:47.635153 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 22:44:47 crc kubenswrapper[4903]: I1126 22:44:47.635410 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="bc6693f3-abb4-4ac9-9535-ff92ff9a740e" containerName="nova-metadata-log" containerID="cri-o://4ebc1924b227f6508b5bdd8b62bb24cba6e5bdbd2e5a87156b89818722ee6c5f" gracePeriod=30 Nov 26 22:44:47 crc kubenswrapper[4903]: I1126 22:44:47.635544 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="bc6693f3-abb4-4ac9-9535-ff92ff9a740e" containerName="nova-metadata-metadata" containerID="cri-o://4d99fa99533ede36bb177771d6a316669901e8b185dc23c2f02108972c26f575" gracePeriod=30 Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.240885 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.361521 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc716136-3185-4571-83e6-7c51a65bff4a-combined-ca-bundle\") pod \"cc716136-3185-4571-83e6-7c51a65bff4a\" (UID: \"cc716136-3185-4571-83e6-7c51a65bff4a\") " Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.361633 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc716136-3185-4571-83e6-7c51a65bff4a-public-tls-certs\") pod \"cc716136-3185-4571-83e6-7c51a65bff4a\" (UID: \"cc716136-3185-4571-83e6-7c51a65bff4a\") " Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.361686 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc716136-3185-4571-83e6-7c51a65bff4a-internal-tls-certs\") pod \"cc716136-3185-4571-83e6-7c51a65bff4a\" (UID: \"cc716136-3185-4571-83e6-7c51a65bff4a\") " Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.361778 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc716136-3185-4571-83e6-7c51a65bff4a-config-data\") pod \"cc716136-3185-4571-83e6-7c51a65bff4a\" (UID: \"cc716136-3185-4571-83e6-7c51a65bff4a\") " Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.361807 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6v7js\" (UniqueName: \"kubernetes.io/projected/cc716136-3185-4571-83e6-7c51a65bff4a-kube-api-access-6v7js\") pod \"cc716136-3185-4571-83e6-7c51a65bff4a\" (UID: \"cc716136-3185-4571-83e6-7c51a65bff4a\") " Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.361853 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc716136-3185-4571-83e6-7c51a65bff4a-logs\") pod \"cc716136-3185-4571-83e6-7c51a65bff4a\" (UID: \"cc716136-3185-4571-83e6-7c51a65bff4a\") " Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.362842 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc716136-3185-4571-83e6-7c51a65bff4a-logs" (OuterVolumeSpecName: "logs") pod "cc716136-3185-4571-83e6-7c51a65bff4a" (UID: "cc716136-3185-4571-83e6-7c51a65bff4a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.367356 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc716136-3185-4571-83e6-7c51a65bff4a-kube-api-access-6v7js" (OuterVolumeSpecName: "kube-api-access-6v7js") pod "cc716136-3185-4571-83e6-7c51a65bff4a" (UID: "cc716136-3185-4571-83e6-7c51a65bff4a"). InnerVolumeSpecName "kube-api-access-6v7js". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.372926 4903 generic.go:334] "Generic (PLEG): container finished" podID="bc6693f3-abb4-4ac9-9535-ff92ff9a740e" containerID="4ebc1924b227f6508b5bdd8b62bb24cba6e5bdbd2e5a87156b89818722ee6c5f" exitCode=143 Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.372985 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bc6693f3-abb4-4ac9-9535-ff92ff9a740e","Type":"ContainerDied","Data":"4ebc1924b227f6508b5bdd8b62bb24cba6e5bdbd2e5a87156b89818722ee6c5f"} Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.377225 4903 generic.go:334] "Generic (PLEG): container finished" podID="cc716136-3185-4571-83e6-7c51a65bff4a" containerID="97f60bf57ad409f5c67757adb94272c47f096ef860cf62e76ce55da16a7abde7" exitCode=0 Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.377244 4903 generic.go:334] "Generic (PLEG): container finished" podID="cc716136-3185-4571-83e6-7c51a65bff4a" containerID="da1b6031f44a30125574cf3bb344c5143a3863279b2661f649acf6e9d74d2697" exitCode=143 Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.377258 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cc716136-3185-4571-83e6-7c51a65bff4a","Type":"ContainerDied","Data":"97f60bf57ad409f5c67757adb94272c47f096ef860cf62e76ce55da16a7abde7"} Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.377273 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cc716136-3185-4571-83e6-7c51a65bff4a","Type":"ContainerDied","Data":"da1b6031f44a30125574cf3bb344c5143a3863279b2661f649acf6e9d74d2697"} Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.377283 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cc716136-3185-4571-83e6-7c51a65bff4a","Type":"ContainerDied","Data":"ff1758dff4d1b664af3867b2234eb7da34044b2b916ddf11b180e2a26ebd2102"} Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.377297 4903 scope.go:117] "RemoveContainer" containerID="97f60bf57ad409f5c67757adb94272c47f096ef860cf62e76ce55da16a7abde7" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.377296 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.397826 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc716136-3185-4571-83e6-7c51a65bff4a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cc716136-3185-4571-83e6-7c51a65bff4a" (UID: "cc716136-3185-4571-83e6-7c51a65bff4a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.411048 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc716136-3185-4571-83e6-7c51a65bff4a-config-data" (OuterVolumeSpecName: "config-data") pod "cc716136-3185-4571-83e6-7c51a65bff4a" (UID: "cc716136-3185-4571-83e6-7c51a65bff4a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.411266 4903 scope.go:117] "RemoveContainer" containerID="da1b6031f44a30125574cf3bb344c5143a3863279b2661f649acf6e9d74d2697" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.434918 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc716136-3185-4571-83e6-7c51a65bff4a-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "cc716136-3185-4571-83e6-7c51a65bff4a" (UID: "cc716136-3185-4571-83e6-7c51a65bff4a"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.439369 4903 scope.go:117] "RemoveContainer" containerID="97f60bf57ad409f5c67757adb94272c47f096ef860cf62e76ce55da16a7abde7" Nov 26 22:44:48 crc kubenswrapper[4903]: E1126 22:44:48.439756 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"97f60bf57ad409f5c67757adb94272c47f096ef860cf62e76ce55da16a7abde7\": container with ID starting with 97f60bf57ad409f5c67757adb94272c47f096ef860cf62e76ce55da16a7abde7 not found: ID does not exist" containerID="97f60bf57ad409f5c67757adb94272c47f096ef860cf62e76ce55da16a7abde7" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.439802 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"97f60bf57ad409f5c67757adb94272c47f096ef860cf62e76ce55da16a7abde7"} err="failed to get container status \"97f60bf57ad409f5c67757adb94272c47f096ef860cf62e76ce55da16a7abde7\": rpc error: code = NotFound desc = could not find container \"97f60bf57ad409f5c67757adb94272c47f096ef860cf62e76ce55da16a7abde7\": container with ID starting with 97f60bf57ad409f5c67757adb94272c47f096ef860cf62e76ce55da16a7abde7 not found: ID does not exist" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.439828 4903 scope.go:117] "RemoveContainer" containerID="da1b6031f44a30125574cf3bb344c5143a3863279b2661f649acf6e9d74d2697" Nov 26 22:44:48 crc kubenswrapper[4903]: E1126 22:44:48.440228 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"da1b6031f44a30125574cf3bb344c5143a3863279b2661f649acf6e9d74d2697\": container with ID starting with da1b6031f44a30125574cf3bb344c5143a3863279b2661f649acf6e9d74d2697 not found: ID does not exist" containerID="da1b6031f44a30125574cf3bb344c5143a3863279b2661f649acf6e9d74d2697" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.440251 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"da1b6031f44a30125574cf3bb344c5143a3863279b2661f649acf6e9d74d2697"} err="failed to get container status \"da1b6031f44a30125574cf3bb344c5143a3863279b2661f649acf6e9d74d2697\": rpc error: code = NotFound desc = could not find container \"da1b6031f44a30125574cf3bb344c5143a3863279b2661f649acf6e9d74d2697\": container with ID starting with da1b6031f44a30125574cf3bb344c5143a3863279b2661f649acf6e9d74d2697 not found: ID does not exist" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.440265 4903 scope.go:117] "RemoveContainer" containerID="97f60bf57ad409f5c67757adb94272c47f096ef860cf62e76ce55da16a7abde7" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.440640 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"97f60bf57ad409f5c67757adb94272c47f096ef860cf62e76ce55da16a7abde7"} err="failed to get container status \"97f60bf57ad409f5c67757adb94272c47f096ef860cf62e76ce55da16a7abde7\": rpc error: code = NotFound desc = could not find container \"97f60bf57ad409f5c67757adb94272c47f096ef860cf62e76ce55da16a7abde7\": container with ID starting with 97f60bf57ad409f5c67757adb94272c47f096ef860cf62e76ce55da16a7abde7 not found: ID does not exist" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.440672 4903 scope.go:117] "RemoveContainer" containerID="da1b6031f44a30125574cf3bb344c5143a3863279b2661f649acf6e9d74d2697" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.440958 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"da1b6031f44a30125574cf3bb344c5143a3863279b2661f649acf6e9d74d2697"} err="failed to get container status \"da1b6031f44a30125574cf3bb344c5143a3863279b2661f649acf6e9d74d2697\": rpc error: code = NotFound desc = could not find container \"da1b6031f44a30125574cf3bb344c5143a3863279b2661f649acf6e9d74d2697\": container with ID starting with da1b6031f44a30125574cf3bb344c5143a3863279b2661f649acf6e9d74d2697 not found: ID does not exist" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.447328 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc716136-3185-4571-83e6-7c51a65bff4a-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "cc716136-3185-4571-83e6-7c51a65bff4a" (UID: "cc716136-3185-4571-83e6-7c51a65bff4a"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.465347 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc716136-3185-4571-83e6-7c51a65bff4a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.465438 4903 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc716136-3185-4571-83e6-7c51a65bff4a-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.465493 4903 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc716136-3185-4571-83e6-7c51a65bff4a-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.465547 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc716136-3185-4571-83e6-7c51a65bff4a-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.465604 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6v7js\" (UniqueName: \"kubernetes.io/projected/cc716136-3185-4571-83e6-7c51a65bff4a-kube-api-access-6v7js\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.465657 4903 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc716136-3185-4571-83e6-7c51a65bff4a-logs\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.778784 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.807270 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.825960 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 22:44:48 crc kubenswrapper[4903]: E1126 22:44:48.826812 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc716136-3185-4571-83e6-7c51a65bff4a" containerName="nova-api-log" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.826840 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc716136-3185-4571-83e6-7c51a65bff4a" containerName="nova-api-log" Nov 26 22:44:48 crc kubenswrapper[4903]: E1126 22:44:48.826855 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc716136-3185-4571-83e6-7c51a65bff4a" containerName="nova-api-api" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.826864 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc716136-3185-4571-83e6-7c51a65bff4a" containerName="nova-api-api" Nov 26 22:44:48 crc kubenswrapper[4903]: E1126 22:44:48.826896 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96696972-c8ae-4828-b9d4-78825bd31e3f" containerName="init" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.826903 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="96696972-c8ae-4828-b9d4-78825bd31e3f" containerName="init" Nov 26 22:44:48 crc kubenswrapper[4903]: E1126 22:44:48.826927 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96696972-c8ae-4828-b9d4-78825bd31e3f" containerName="dnsmasq-dns" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.826935 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="96696972-c8ae-4828-b9d4-78825bd31e3f" containerName="dnsmasq-dns" Nov 26 22:44:48 crc kubenswrapper[4903]: E1126 22:44:48.826970 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6" containerName="nova-manage" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.826978 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6" containerName="nova-manage" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.831007 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6" containerName="nova-manage" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.831050 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc716136-3185-4571-83e6-7c51a65bff4a" containerName="nova-api-api" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.831073 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc716136-3185-4571-83e6-7c51a65bff4a" containerName="nova-api-log" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.831084 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="96696972-c8ae-4828-b9d4-78825bd31e3f" containerName="dnsmasq-dns" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.835196 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.835300 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.838007 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.838084 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.839151 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.977133 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d23a575d-55d9-4805-bfee-09f92b0b97ef-public-tls-certs\") pod \"nova-api-0\" (UID: \"d23a575d-55d9-4805-bfee-09f92b0b97ef\") " pod="openstack/nova-api-0" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.977199 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfxpf\" (UniqueName: \"kubernetes.io/projected/d23a575d-55d9-4805-bfee-09f92b0b97ef-kube-api-access-tfxpf\") pod \"nova-api-0\" (UID: \"d23a575d-55d9-4805-bfee-09f92b0b97ef\") " pod="openstack/nova-api-0" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.977653 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d23a575d-55d9-4805-bfee-09f92b0b97ef-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d23a575d-55d9-4805-bfee-09f92b0b97ef\") " pod="openstack/nova-api-0" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.977798 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d23a575d-55d9-4805-bfee-09f92b0b97ef-internal-tls-certs\") pod \"nova-api-0\" (UID: \"d23a575d-55d9-4805-bfee-09f92b0b97ef\") " pod="openstack/nova-api-0" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.977997 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d23a575d-55d9-4805-bfee-09f92b0b97ef-logs\") pod \"nova-api-0\" (UID: \"d23a575d-55d9-4805-bfee-09f92b0b97ef\") " pod="openstack/nova-api-0" Nov 26 22:44:48 crc kubenswrapper[4903]: I1126 22:44:48.978108 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d23a575d-55d9-4805-bfee-09f92b0b97ef-config-data\") pod \"nova-api-0\" (UID: \"d23a575d-55d9-4805-bfee-09f92b0b97ef\") " pod="openstack/nova-api-0" Nov 26 22:44:49 crc kubenswrapper[4903]: I1126 22:44:49.080135 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d23a575d-55d9-4805-bfee-09f92b0b97ef-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d23a575d-55d9-4805-bfee-09f92b0b97ef\") " pod="openstack/nova-api-0" Nov 26 22:44:49 crc kubenswrapper[4903]: I1126 22:44:49.080201 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d23a575d-55d9-4805-bfee-09f92b0b97ef-internal-tls-certs\") pod \"nova-api-0\" (UID: \"d23a575d-55d9-4805-bfee-09f92b0b97ef\") " pod="openstack/nova-api-0" Nov 26 22:44:49 crc kubenswrapper[4903]: I1126 22:44:49.080283 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d23a575d-55d9-4805-bfee-09f92b0b97ef-logs\") pod \"nova-api-0\" (UID: \"d23a575d-55d9-4805-bfee-09f92b0b97ef\") " pod="openstack/nova-api-0" Nov 26 22:44:49 crc kubenswrapper[4903]: I1126 22:44:49.080336 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d23a575d-55d9-4805-bfee-09f92b0b97ef-config-data\") pod \"nova-api-0\" (UID: \"d23a575d-55d9-4805-bfee-09f92b0b97ef\") " pod="openstack/nova-api-0" Nov 26 22:44:49 crc kubenswrapper[4903]: I1126 22:44:49.080385 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d23a575d-55d9-4805-bfee-09f92b0b97ef-public-tls-certs\") pod \"nova-api-0\" (UID: \"d23a575d-55d9-4805-bfee-09f92b0b97ef\") " pod="openstack/nova-api-0" Nov 26 22:44:49 crc kubenswrapper[4903]: I1126 22:44:49.080426 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfxpf\" (UniqueName: \"kubernetes.io/projected/d23a575d-55d9-4805-bfee-09f92b0b97ef-kube-api-access-tfxpf\") pod \"nova-api-0\" (UID: \"d23a575d-55d9-4805-bfee-09f92b0b97ef\") " pod="openstack/nova-api-0" Nov 26 22:44:49 crc kubenswrapper[4903]: I1126 22:44:49.081379 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d23a575d-55d9-4805-bfee-09f92b0b97ef-logs\") pod \"nova-api-0\" (UID: \"d23a575d-55d9-4805-bfee-09f92b0b97ef\") " pod="openstack/nova-api-0" Nov 26 22:44:49 crc kubenswrapper[4903]: I1126 22:44:49.096509 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d23a575d-55d9-4805-bfee-09f92b0b97ef-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d23a575d-55d9-4805-bfee-09f92b0b97ef\") " pod="openstack/nova-api-0" Nov 26 22:44:49 crc kubenswrapper[4903]: I1126 22:44:49.099139 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d23a575d-55d9-4805-bfee-09f92b0b97ef-internal-tls-certs\") pod \"nova-api-0\" (UID: \"d23a575d-55d9-4805-bfee-09f92b0b97ef\") " pod="openstack/nova-api-0" Nov 26 22:44:49 crc kubenswrapper[4903]: I1126 22:44:49.105647 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d23a575d-55d9-4805-bfee-09f92b0b97ef-config-data\") pod \"nova-api-0\" (UID: \"d23a575d-55d9-4805-bfee-09f92b0b97ef\") " pod="openstack/nova-api-0" Nov 26 22:44:49 crc kubenswrapper[4903]: I1126 22:44:49.106326 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d23a575d-55d9-4805-bfee-09f92b0b97ef-public-tls-certs\") pod \"nova-api-0\" (UID: \"d23a575d-55d9-4805-bfee-09f92b0b97ef\") " pod="openstack/nova-api-0" Nov 26 22:44:49 crc kubenswrapper[4903]: I1126 22:44:49.153830 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfxpf\" (UniqueName: \"kubernetes.io/projected/d23a575d-55d9-4805-bfee-09f92b0b97ef-kube-api-access-tfxpf\") pod \"nova-api-0\" (UID: \"d23a575d-55d9-4805-bfee-09f92b0b97ef\") " pod="openstack/nova-api-0" Nov 26 22:44:49 crc kubenswrapper[4903]: I1126 22:44:49.453189 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 22:44:49 crc kubenswrapper[4903]: I1126 22:44:49.964621 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 22:44:49 crc kubenswrapper[4903]: W1126 22:44:49.981601 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd23a575d_55d9_4805_bfee_09f92b0b97ef.slice/crio-05129e1701f8307b303e5a1b90a2966a9a149d555d50a7d2859fc7b3d62d1833 WatchSource:0}: Error finding container 05129e1701f8307b303e5a1b90a2966a9a149d555d50a7d2859fc7b3d62d1833: Status 404 returned error can't find the container with id 05129e1701f8307b303e5a1b90a2966a9a149d555d50a7d2859fc7b3d62d1833 Nov 26 22:44:50 crc kubenswrapper[4903]: I1126 22:44:50.045590 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc716136-3185-4571-83e6-7c51a65bff4a" path="/var/lib/kubelet/pods/cc716136-3185-4571-83e6-7c51a65bff4a/volumes" Nov 26 22:44:50 crc kubenswrapper[4903]: I1126 22:44:50.420590 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d23a575d-55d9-4805-bfee-09f92b0b97ef","Type":"ContainerStarted","Data":"8025d49bddbbbc61605ef36447fa32488815844775e52455dbf97be8f0392420"} Nov 26 22:44:50 crc kubenswrapper[4903]: I1126 22:44:50.420876 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d23a575d-55d9-4805-bfee-09f92b0b97ef","Type":"ContainerStarted","Data":"05129e1701f8307b303e5a1b90a2966a9a149d555d50a7d2859fc7b3d62d1833"} Nov 26 22:44:50 crc kubenswrapper[4903]: I1126 22:44:50.454782 4903 generic.go:334] "Generic (PLEG): container finished" podID="218f4c78-00c2-47c5-aecb-d58e14e73b0c" containerID="b768da7d634e11040926ffd9959a928c6247655a4da938e79d6b0c76f3892d4b" exitCode=0 Nov 26 22:44:50 crc kubenswrapper[4903]: I1126 22:44:50.454828 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"218f4c78-00c2-47c5-aecb-d58e14e73b0c","Type":"ContainerDied","Data":"b768da7d634e11040926ffd9959a928c6247655a4da938e79d6b0c76f3892d4b"} Nov 26 22:44:50 crc kubenswrapper[4903]: I1126 22:44:50.745590 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 22:44:50 crc kubenswrapper[4903]: I1126 22:44:50.831023 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="bc6693f3-abb4-4ac9-9535-ff92ff9a740e" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.239:8775/\": read tcp 10.217.0.2:58134->10.217.0.239:8775: read: connection reset by peer" Nov 26 22:44:50 crc kubenswrapper[4903]: I1126 22:44:50.830997 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="bc6693f3-abb4-4ac9-9535-ff92ff9a740e" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.239:8775/\": read tcp 10.217.0.2:58142->10.217.0.239:8775: read: connection reset by peer" Nov 26 22:44:50 crc kubenswrapper[4903]: I1126 22:44:50.832047 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/218f4c78-00c2-47c5-aecb-d58e14e73b0c-combined-ca-bundle\") pod \"218f4c78-00c2-47c5-aecb-d58e14e73b0c\" (UID: \"218f4c78-00c2-47c5-aecb-d58e14e73b0c\") " Nov 26 22:44:50 crc kubenswrapper[4903]: I1126 22:44:50.832238 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/218f4c78-00c2-47c5-aecb-d58e14e73b0c-config-data\") pod \"218f4c78-00c2-47c5-aecb-d58e14e73b0c\" (UID: \"218f4c78-00c2-47c5-aecb-d58e14e73b0c\") " Nov 26 22:44:50 crc kubenswrapper[4903]: I1126 22:44:50.832295 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzqz5\" (UniqueName: \"kubernetes.io/projected/218f4c78-00c2-47c5-aecb-d58e14e73b0c-kube-api-access-lzqz5\") pod \"218f4c78-00c2-47c5-aecb-d58e14e73b0c\" (UID: \"218f4c78-00c2-47c5-aecb-d58e14e73b0c\") " Nov 26 22:44:50 crc kubenswrapper[4903]: I1126 22:44:50.843853 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/218f4c78-00c2-47c5-aecb-d58e14e73b0c-kube-api-access-lzqz5" (OuterVolumeSpecName: "kube-api-access-lzqz5") pod "218f4c78-00c2-47c5-aecb-d58e14e73b0c" (UID: "218f4c78-00c2-47c5-aecb-d58e14e73b0c"). InnerVolumeSpecName "kube-api-access-lzqz5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:44:50 crc kubenswrapper[4903]: I1126 22:44:50.876849 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/218f4c78-00c2-47c5-aecb-d58e14e73b0c-config-data" (OuterVolumeSpecName: "config-data") pod "218f4c78-00c2-47c5-aecb-d58e14e73b0c" (UID: "218f4c78-00c2-47c5-aecb-d58e14e73b0c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:50 crc kubenswrapper[4903]: I1126 22:44:50.890832 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/218f4c78-00c2-47c5-aecb-d58e14e73b0c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "218f4c78-00c2-47c5-aecb-d58e14e73b0c" (UID: "218f4c78-00c2-47c5-aecb-d58e14e73b0c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:50 crc kubenswrapper[4903]: I1126 22:44:50.935148 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/218f4c78-00c2-47c5-aecb-d58e14e73b0c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:50 crc kubenswrapper[4903]: I1126 22:44:50.935186 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/218f4c78-00c2-47c5-aecb-d58e14e73b0c-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:50 crc kubenswrapper[4903]: I1126 22:44:50.935196 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzqz5\" (UniqueName: \"kubernetes.io/projected/218f4c78-00c2-47c5-aecb-d58e14e73b0c-kube-api-access-lzqz5\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.314058 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.450211 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc6693f3-abb4-4ac9-9535-ff92ff9a740e-config-data\") pod \"bc6693f3-abb4-4ac9-9535-ff92ff9a740e\" (UID: \"bc6693f3-abb4-4ac9-9535-ff92ff9a740e\") " Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.450395 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc6693f3-abb4-4ac9-9535-ff92ff9a740e-nova-metadata-tls-certs\") pod \"bc6693f3-abb4-4ac9-9535-ff92ff9a740e\" (UID: \"bc6693f3-abb4-4ac9-9535-ff92ff9a740e\") " Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.450588 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc6693f3-abb4-4ac9-9535-ff92ff9a740e-logs\") pod \"bc6693f3-abb4-4ac9-9535-ff92ff9a740e\" (UID: \"bc6693f3-abb4-4ac9-9535-ff92ff9a740e\") " Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.450621 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc6693f3-abb4-4ac9-9535-ff92ff9a740e-combined-ca-bundle\") pod \"bc6693f3-abb4-4ac9-9535-ff92ff9a740e\" (UID: \"bc6693f3-abb4-4ac9-9535-ff92ff9a740e\") " Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.450651 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj7n8\" (UniqueName: \"kubernetes.io/projected/bc6693f3-abb4-4ac9-9535-ff92ff9a740e-kube-api-access-pj7n8\") pod \"bc6693f3-abb4-4ac9-9535-ff92ff9a740e\" (UID: \"bc6693f3-abb4-4ac9-9535-ff92ff9a740e\") " Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.451489 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc6693f3-abb4-4ac9-9535-ff92ff9a740e-logs" (OuterVolumeSpecName: "logs") pod "bc6693f3-abb4-4ac9-9535-ff92ff9a740e" (UID: "bc6693f3-abb4-4ac9-9535-ff92ff9a740e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.454146 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc6693f3-abb4-4ac9-9535-ff92ff9a740e-kube-api-access-pj7n8" (OuterVolumeSpecName: "kube-api-access-pj7n8") pod "bc6693f3-abb4-4ac9-9535-ff92ff9a740e" (UID: "bc6693f3-abb4-4ac9-9535-ff92ff9a740e"). InnerVolumeSpecName "kube-api-access-pj7n8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.474244 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.474730 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"218f4c78-00c2-47c5-aecb-d58e14e73b0c","Type":"ContainerDied","Data":"e22f54b7508958228628f698b1ae04a7d889ff8cbebc52091dac62b779e15236"} Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.474796 4903 scope.go:117] "RemoveContainer" containerID="b768da7d634e11040926ffd9959a928c6247655a4da938e79d6b0c76f3892d4b" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.478533 4903 generic.go:334] "Generic (PLEG): container finished" podID="bc6693f3-abb4-4ac9-9535-ff92ff9a740e" containerID="4d99fa99533ede36bb177771d6a316669901e8b185dc23c2f02108972c26f575" exitCode=0 Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.478659 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bc6693f3-abb4-4ac9-9535-ff92ff9a740e","Type":"ContainerDied","Data":"4d99fa99533ede36bb177771d6a316669901e8b185dc23c2f02108972c26f575"} Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.478739 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"bc6693f3-abb4-4ac9-9535-ff92ff9a740e","Type":"ContainerDied","Data":"17a545282f76d7fb261e86179768215072e0ce65d5bfc651c0d32d5ffe7770da"} Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.478879 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.494182 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d23a575d-55d9-4805-bfee-09f92b0b97ef","Type":"ContainerStarted","Data":"890b7e782b5c6f76b032da17a764b89464675289eded1430c4569cf6c0c9465a"} Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.515243 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.5152285770000002 podStartE2EDuration="3.515228577s" podCreationTimestamp="2025-11-26 22:44:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:44:51.511622651 +0000 UTC m=+1420.201857561" watchObservedRunningTime="2025-11-26 22:44:51.515228577 +0000 UTC m=+1420.205463487" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.526389 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc6693f3-abb4-4ac9-9535-ff92ff9a740e-config-data" (OuterVolumeSpecName: "config-data") pod "bc6693f3-abb4-4ac9-9535-ff92ff9a740e" (UID: "bc6693f3-abb4-4ac9-9535-ff92ff9a740e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.531986 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc6693f3-abb4-4ac9-9535-ff92ff9a740e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bc6693f3-abb4-4ac9-9535-ff92ff9a740e" (UID: "bc6693f3-abb4-4ac9-9535-ff92ff9a740e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.549848 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc6693f3-abb4-4ac9-9535-ff92ff9a740e-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "bc6693f3-abb4-4ac9-9535-ff92ff9a740e" (UID: "bc6693f3-abb4-4ac9-9535-ff92ff9a740e"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.554339 4903 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc6693f3-abb4-4ac9-9535-ff92ff9a740e-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.554390 4903 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc6693f3-abb4-4ac9-9535-ff92ff9a740e-logs\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.554410 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc6693f3-abb4-4ac9-9535-ff92ff9a740e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.554428 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj7n8\" (UniqueName: \"kubernetes.io/projected/bc6693f3-abb4-4ac9-9535-ff92ff9a740e-kube-api-access-pj7n8\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.554447 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc6693f3-abb4-4ac9-9535-ff92ff9a740e-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.611448 4903 scope.go:117] "RemoveContainer" containerID="4d99fa99533ede36bb177771d6a316669901e8b185dc23c2f02108972c26f575" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.619579 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.635126 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.645602 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 22:44:51 crc kubenswrapper[4903]: E1126 22:44:51.646175 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc6693f3-abb4-4ac9-9535-ff92ff9a740e" containerName="nova-metadata-metadata" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.646193 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc6693f3-abb4-4ac9-9535-ff92ff9a740e" containerName="nova-metadata-metadata" Nov 26 22:44:51 crc kubenswrapper[4903]: E1126 22:44:51.646212 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="218f4c78-00c2-47c5-aecb-d58e14e73b0c" containerName="nova-scheduler-scheduler" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.646218 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="218f4c78-00c2-47c5-aecb-d58e14e73b0c" containerName="nova-scheduler-scheduler" Nov 26 22:44:51 crc kubenswrapper[4903]: E1126 22:44:51.646251 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc6693f3-abb4-4ac9-9535-ff92ff9a740e" containerName="nova-metadata-log" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.646257 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc6693f3-abb4-4ac9-9535-ff92ff9a740e" containerName="nova-metadata-log" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.646470 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc6693f3-abb4-4ac9-9535-ff92ff9a740e" containerName="nova-metadata-log" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.646498 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc6693f3-abb4-4ac9-9535-ff92ff9a740e" containerName="nova-metadata-metadata" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.646515 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="218f4c78-00c2-47c5-aecb-d58e14e73b0c" containerName="nova-scheduler-scheduler" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.647342 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.651148 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.654564 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.659635 4903 scope.go:117] "RemoveContainer" containerID="4ebc1924b227f6508b5bdd8b62bb24cba6e5bdbd2e5a87156b89818722ee6c5f" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.688887 4903 scope.go:117] "RemoveContainer" containerID="4d99fa99533ede36bb177771d6a316669901e8b185dc23c2f02108972c26f575" Nov 26 22:44:51 crc kubenswrapper[4903]: E1126 22:44:51.689405 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d99fa99533ede36bb177771d6a316669901e8b185dc23c2f02108972c26f575\": container with ID starting with 4d99fa99533ede36bb177771d6a316669901e8b185dc23c2f02108972c26f575 not found: ID does not exist" containerID="4d99fa99533ede36bb177771d6a316669901e8b185dc23c2f02108972c26f575" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.689455 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d99fa99533ede36bb177771d6a316669901e8b185dc23c2f02108972c26f575"} err="failed to get container status \"4d99fa99533ede36bb177771d6a316669901e8b185dc23c2f02108972c26f575\": rpc error: code = NotFound desc = could not find container \"4d99fa99533ede36bb177771d6a316669901e8b185dc23c2f02108972c26f575\": container with ID starting with 4d99fa99533ede36bb177771d6a316669901e8b185dc23c2f02108972c26f575 not found: ID does not exist" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.689489 4903 scope.go:117] "RemoveContainer" containerID="4ebc1924b227f6508b5bdd8b62bb24cba6e5bdbd2e5a87156b89818722ee6c5f" Nov 26 22:44:51 crc kubenswrapper[4903]: E1126 22:44:51.690074 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ebc1924b227f6508b5bdd8b62bb24cba6e5bdbd2e5a87156b89818722ee6c5f\": container with ID starting with 4ebc1924b227f6508b5bdd8b62bb24cba6e5bdbd2e5a87156b89818722ee6c5f not found: ID does not exist" containerID="4ebc1924b227f6508b5bdd8b62bb24cba6e5bdbd2e5a87156b89818722ee6c5f" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.690135 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ebc1924b227f6508b5bdd8b62bb24cba6e5bdbd2e5a87156b89818722ee6c5f"} err="failed to get container status \"4ebc1924b227f6508b5bdd8b62bb24cba6e5bdbd2e5a87156b89818722ee6c5f\": rpc error: code = NotFound desc = could not find container \"4ebc1924b227f6508b5bdd8b62bb24cba6e5bdbd2e5a87156b89818722ee6c5f\": container with ID starting with 4ebc1924b227f6508b5bdd8b62bb24cba6e5bdbd2e5a87156b89818722ee6c5f not found: ID does not exist" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.761488 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nv6l7\" (UniqueName: \"kubernetes.io/projected/646d5a7a-f188-4dc2-99ac-24c16bcf59fc-kube-api-access-nv6l7\") pod \"nova-scheduler-0\" (UID: \"646d5a7a-f188-4dc2-99ac-24c16bcf59fc\") " pod="openstack/nova-scheduler-0" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.761968 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/646d5a7a-f188-4dc2-99ac-24c16bcf59fc-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"646d5a7a-f188-4dc2-99ac-24c16bcf59fc\") " pod="openstack/nova-scheduler-0" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.762040 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/646d5a7a-f188-4dc2-99ac-24c16bcf59fc-config-data\") pod \"nova-scheduler-0\" (UID: \"646d5a7a-f188-4dc2-99ac-24c16bcf59fc\") " pod="openstack/nova-scheduler-0" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.835291 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.854494 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.864123 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/646d5a7a-f188-4dc2-99ac-24c16bcf59fc-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"646d5a7a-f188-4dc2-99ac-24c16bcf59fc\") " pod="openstack/nova-scheduler-0" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.864174 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/646d5a7a-f188-4dc2-99ac-24c16bcf59fc-config-data\") pod \"nova-scheduler-0\" (UID: \"646d5a7a-f188-4dc2-99ac-24c16bcf59fc\") " pod="openstack/nova-scheduler-0" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.864291 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nv6l7\" (UniqueName: \"kubernetes.io/projected/646d5a7a-f188-4dc2-99ac-24c16bcf59fc-kube-api-access-nv6l7\") pod \"nova-scheduler-0\" (UID: \"646d5a7a-f188-4dc2-99ac-24c16bcf59fc\") " pod="openstack/nova-scheduler-0" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.865773 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.868725 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/646d5a7a-f188-4dc2-99ac-24c16bcf59fc-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"646d5a7a-f188-4dc2-99ac-24c16bcf59fc\") " pod="openstack/nova-scheduler-0" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.870584 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.874023 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.874513 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.874767 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.881636 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/646d5a7a-f188-4dc2-99ac-24c16bcf59fc-config-data\") pod \"nova-scheduler-0\" (UID: \"646d5a7a-f188-4dc2-99ac-24c16bcf59fc\") " pod="openstack/nova-scheduler-0" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.888112 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nv6l7\" (UniqueName: \"kubernetes.io/projected/646d5a7a-f188-4dc2-99ac-24c16bcf59fc-kube-api-access-nv6l7\") pod \"nova-scheduler-0\" (UID: \"646d5a7a-f188-4dc2-99ac-24c16bcf59fc\") " pod="openstack/nova-scheduler-0" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.965670 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.966328 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6g25\" (UniqueName: \"kubernetes.io/projected/ecafb017-7ef9-492e-95d5-d297ec3c9725-kube-api-access-v6g25\") pod \"nova-metadata-0\" (UID: \"ecafb017-7ef9-492e-95d5-d297ec3c9725\") " pod="openstack/nova-metadata-0" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.966403 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecafb017-7ef9-492e-95d5-d297ec3c9725-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ecafb017-7ef9-492e-95d5-d297ec3c9725\") " pod="openstack/nova-metadata-0" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.967430 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ecafb017-7ef9-492e-95d5-d297ec3c9725-logs\") pod \"nova-metadata-0\" (UID: \"ecafb017-7ef9-492e-95d5-d297ec3c9725\") " pod="openstack/nova-metadata-0" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.967831 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ecafb017-7ef9-492e-95d5-d297ec3c9725-config-data\") pod \"nova-metadata-0\" (UID: \"ecafb017-7ef9-492e-95d5-d297ec3c9725\") " pod="openstack/nova-metadata-0" Nov 26 22:44:51 crc kubenswrapper[4903]: I1126 22:44:51.967949 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ecafb017-7ef9-492e-95d5-d297ec3c9725-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"ecafb017-7ef9-492e-95d5-d297ec3c9725\") " pod="openstack/nova-metadata-0" Nov 26 22:44:52 crc kubenswrapper[4903]: I1126 22:44:52.056257 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="218f4c78-00c2-47c5-aecb-d58e14e73b0c" path="/var/lib/kubelet/pods/218f4c78-00c2-47c5-aecb-d58e14e73b0c/volumes" Nov 26 22:44:52 crc kubenswrapper[4903]: I1126 22:44:52.058333 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc6693f3-abb4-4ac9-9535-ff92ff9a740e" path="/var/lib/kubelet/pods/bc6693f3-abb4-4ac9-9535-ff92ff9a740e/volumes" Nov 26 22:44:52 crc kubenswrapper[4903]: I1126 22:44:52.070785 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ecafb017-7ef9-492e-95d5-d297ec3c9725-logs\") pod \"nova-metadata-0\" (UID: \"ecafb017-7ef9-492e-95d5-d297ec3c9725\") " pod="openstack/nova-metadata-0" Nov 26 22:44:52 crc kubenswrapper[4903]: I1126 22:44:52.070965 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ecafb017-7ef9-492e-95d5-d297ec3c9725-config-data\") pod \"nova-metadata-0\" (UID: \"ecafb017-7ef9-492e-95d5-d297ec3c9725\") " pod="openstack/nova-metadata-0" Nov 26 22:44:52 crc kubenswrapper[4903]: I1126 22:44:52.071025 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ecafb017-7ef9-492e-95d5-d297ec3c9725-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"ecafb017-7ef9-492e-95d5-d297ec3c9725\") " pod="openstack/nova-metadata-0" Nov 26 22:44:52 crc kubenswrapper[4903]: I1126 22:44:52.071112 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6g25\" (UniqueName: \"kubernetes.io/projected/ecafb017-7ef9-492e-95d5-d297ec3c9725-kube-api-access-v6g25\") pod \"nova-metadata-0\" (UID: \"ecafb017-7ef9-492e-95d5-d297ec3c9725\") " pod="openstack/nova-metadata-0" Nov 26 22:44:52 crc kubenswrapper[4903]: I1126 22:44:52.071145 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecafb017-7ef9-492e-95d5-d297ec3c9725-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ecafb017-7ef9-492e-95d5-d297ec3c9725\") " pod="openstack/nova-metadata-0" Nov 26 22:44:52 crc kubenswrapper[4903]: I1126 22:44:52.071378 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ecafb017-7ef9-492e-95d5-d297ec3c9725-logs\") pod \"nova-metadata-0\" (UID: \"ecafb017-7ef9-492e-95d5-d297ec3c9725\") " pod="openstack/nova-metadata-0" Nov 26 22:44:52 crc kubenswrapper[4903]: I1126 22:44:52.079030 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ecafb017-7ef9-492e-95d5-d297ec3c9725-config-data\") pod \"nova-metadata-0\" (UID: \"ecafb017-7ef9-492e-95d5-d297ec3c9725\") " pod="openstack/nova-metadata-0" Nov 26 22:44:52 crc kubenswrapper[4903]: I1126 22:44:52.081246 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ecafb017-7ef9-492e-95d5-d297ec3c9725-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"ecafb017-7ef9-492e-95d5-d297ec3c9725\") " pod="openstack/nova-metadata-0" Nov 26 22:44:52 crc kubenswrapper[4903]: I1126 22:44:52.089363 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecafb017-7ef9-492e-95d5-d297ec3c9725-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ecafb017-7ef9-492e-95d5-d297ec3c9725\") " pod="openstack/nova-metadata-0" Nov 26 22:44:52 crc kubenswrapper[4903]: I1126 22:44:52.093988 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6g25\" (UniqueName: \"kubernetes.io/projected/ecafb017-7ef9-492e-95d5-d297ec3c9725-kube-api-access-v6g25\") pod \"nova-metadata-0\" (UID: \"ecafb017-7ef9-492e-95d5-d297ec3c9725\") " pod="openstack/nova-metadata-0" Nov 26 22:44:52 crc kubenswrapper[4903]: I1126 22:44:52.198955 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 22:44:52 crc kubenswrapper[4903]: I1126 22:44:52.487795 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 22:44:52 crc kubenswrapper[4903]: I1126 22:44:52.505270 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"646d5a7a-f188-4dc2-99ac-24c16bcf59fc","Type":"ContainerStarted","Data":"11aef774f01fb9e1ec480002d476a9d113477ba90182c7ac0c335105fba742a9"} Nov 26 22:44:52 crc kubenswrapper[4903]: W1126 22:44:52.768571 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podecafb017_7ef9_492e_95d5_d297ec3c9725.slice/crio-510e5f19bedae553bbbedf55da05bc26efa4357802cf1d5651cd5d4f0189190e WatchSource:0}: Error finding container 510e5f19bedae553bbbedf55da05bc26efa4357802cf1d5651cd5d4f0189190e: Status 404 returned error can't find the container with id 510e5f19bedae553bbbedf55da05bc26efa4357802cf1d5651cd5d4f0189190e Nov 26 22:44:52 crc kubenswrapper[4903]: I1126 22:44:52.770120 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 22:44:53 crc kubenswrapper[4903]: I1126 22:44:53.526423 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ecafb017-7ef9-492e-95d5-d297ec3c9725","Type":"ContainerStarted","Data":"220fded0ee6a5c1cf82606896207535e054f5920af73cf5d28e8b75c4c763b1f"} Nov 26 22:44:53 crc kubenswrapper[4903]: I1126 22:44:53.526491 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ecafb017-7ef9-492e-95d5-d297ec3c9725","Type":"ContainerStarted","Data":"e5c45d4c265a9aa5cc5d2b3ff2d43ea027cb7da876c975536c84f420cf9c3406"} Nov 26 22:44:53 crc kubenswrapper[4903]: I1126 22:44:53.526512 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ecafb017-7ef9-492e-95d5-d297ec3c9725","Type":"ContainerStarted","Data":"510e5f19bedae553bbbedf55da05bc26efa4357802cf1d5651cd5d4f0189190e"} Nov 26 22:44:53 crc kubenswrapper[4903]: I1126 22:44:53.529453 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"646d5a7a-f188-4dc2-99ac-24c16bcf59fc","Type":"ContainerStarted","Data":"e8b0f6d6c807fbe5cbe420199c808424fe982863cc18eb433ff8c1171e33d20f"} Nov 26 22:44:53 crc kubenswrapper[4903]: I1126 22:44:53.548268 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.548250738 podStartE2EDuration="2.548250738s" podCreationTimestamp="2025-11-26 22:44:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:44:53.543664555 +0000 UTC m=+1422.233899475" watchObservedRunningTime="2025-11-26 22:44:53.548250738 +0000 UTC m=+1422.238485648" Nov 26 22:44:53 crc kubenswrapper[4903]: I1126 22:44:53.574423 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.57440524 podStartE2EDuration="2.57440524s" podCreationTimestamp="2025-11-26 22:44:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:44:53.566413345 +0000 UTC m=+1422.256648285" watchObservedRunningTime="2025-11-26 22:44:53.57440524 +0000 UTC m=+1422.264640150" Nov 26 22:44:53 crc kubenswrapper[4903]: I1126 22:44:53.626411 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bxpxf"] Nov 26 22:44:53 crc kubenswrapper[4903]: I1126 22:44:53.633373 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bxpxf" Nov 26 22:44:53 crc kubenswrapper[4903]: I1126 22:44:53.647636 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bxpxf"] Nov 26 22:44:53 crc kubenswrapper[4903]: I1126 22:44:53.727737 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7q8jn\" (UniqueName: \"kubernetes.io/projected/23d7d0fe-46b7-41b2-b568-56f18f564748-kube-api-access-7q8jn\") pod \"redhat-operators-bxpxf\" (UID: \"23d7d0fe-46b7-41b2-b568-56f18f564748\") " pod="openshift-marketplace/redhat-operators-bxpxf" Nov 26 22:44:53 crc kubenswrapper[4903]: I1126 22:44:53.727925 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23d7d0fe-46b7-41b2-b568-56f18f564748-catalog-content\") pod \"redhat-operators-bxpxf\" (UID: \"23d7d0fe-46b7-41b2-b568-56f18f564748\") " pod="openshift-marketplace/redhat-operators-bxpxf" Nov 26 22:44:53 crc kubenswrapper[4903]: I1126 22:44:53.727990 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23d7d0fe-46b7-41b2-b568-56f18f564748-utilities\") pod \"redhat-operators-bxpxf\" (UID: \"23d7d0fe-46b7-41b2-b568-56f18f564748\") " pod="openshift-marketplace/redhat-operators-bxpxf" Nov 26 22:44:53 crc kubenswrapper[4903]: I1126 22:44:53.830269 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23d7d0fe-46b7-41b2-b568-56f18f564748-catalog-content\") pod \"redhat-operators-bxpxf\" (UID: \"23d7d0fe-46b7-41b2-b568-56f18f564748\") " pod="openshift-marketplace/redhat-operators-bxpxf" Nov 26 22:44:53 crc kubenswrapper[4903]: I1126 22:44:53.830329 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23d7d0fe-46b7-41b2-b568-56f18f564748-utilities\") pod \"redhat-operators-bxpxf\" (UID: \"23d7d0fe-46b7-41b2-b568-56f18f564748\") " pod="openshift-marketplace/redhat-operators-bxpxf" Nov 26 22:44:53 crc kubenswrapper[4903]: I1126 22:44:53.830476 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7q8jn\" (UniqueName: \"kubernetes.io/projected/23d7d0fe-46b7-41b2-b568-56f18f564748-kube-api-access-7q8jn\") pod \"redhat-operators-bxpxf\" (UID: \"23d7d0fe-46b7-41b2-b568-56f18f564748\") " pod="openshift-marketplace/redhat-operators-bxpxf" Nov 26 22:44:53 crc kubenswrapper[4903]: I1126 22:44:53.830775 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23d7d0fe-46b7-41b2-b568-56f18f564748-catalog-content\") pod \"redhat-operators-bxpxf\" (UID: \"23d7d0fe-46b7-41b2-b568-56f18f564748\") " pod="openshift-marketplace/redhat-operators-bxpxf" Nov 26 22:44:53 crc kubenswrapper[4903]: I1126 22:44:53.831133 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23d7d0fe-46b7-41b2-b568-56f18f564748-utilities\") pod \"redhat-operators-bxpxf\" (UID: \"23d7d0fe-46b7-41b2-b568-56f18f564748\") " pod="openshift-marketplace/redhat-operators-bxpxf" Nov 26 22:44:53 crc kubenswrapper[4903]: I1126 22:44:53.871478 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7q8jn\" (UniqueName: \"kubernetes.io/projected/23d7d0fe-46b7-41b2-b568-56f18f564748-kube-api-access-7q8jn\") pod \"redhat-operators-bxpxf\" (UID: \"23d7d0fe-46b7-41b2-b568-56f18f564748\") " pod="openshift-marketplace/redhat-operators-bxpxf" Nov 26 22:44:53 crc kubenswrapper[4903]: I1126 22:44:53.956519 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bxpxf" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.469497 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bxpxf"] Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.583033 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bxpxf" event={"ID":"23d7d0fe-46b7-41b2-b568-56f18f564748","Type":"ContainerStarted","Data":"bfe64ce7a419cacd55bd20288a4464a6d51ae4095f8770980580b0a4a9e65d89"} Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.703387 4903 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.704961 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.705434 4903 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.705811 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43" gracePeriod=15 Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.705850 4903 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.705885 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800" gracePeriod=15 Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.705950 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238" gracePeriod=15 Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.705838 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43" gracePeriod=15 Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.706253 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d" gracePeriod=15 Nov 26 22:44:54 crc kubenswrapper[4903]: E1126 22:44:54.706596 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.706617 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 26 22:44:54 crc kubenswrapper[4903]: E1126 22:44:54.706672 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.706685 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 26 22:44:54 crc kubenswrapper[4903]: E1126 22:44:54.706747 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.706760 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 26 22:44:54 crc kubenswrapper[4903]: E1126 22:44:54.706785 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.706796 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 22:44:54 crc kubenswrapper[4903]: E1126 22:44:54.706821 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.706832 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 26 22:44:54 crc kubenswrapper[4903]: E1126 22:44:54.706846 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.706858 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 26 22:44:54 crc kubenswrapper[4903]: E1126 22:44:54.706872 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.706884 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.707294 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.707330 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.707357 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.707376 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.707389 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.707410 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.861974 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.862305 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.862351 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.862426 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.862461 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.862501 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.862573 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.862615 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:44:54 crc kubenswrapper[4903]: E1126 22:44:54.874119 4903 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.219:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.965805 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.966678 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.966993 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.967058 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.967133 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.967060 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.967181 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.967220 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.967265 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.967306 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.967475 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.967600 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.967621 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.967655 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.967997 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 22:44:54 crc kubenswrapper[4903]: I1126 22:44:54.968112 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.174985 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 22:44:55 crc kubenswrapper[4903]: E1126 22:44:55.264642 4903 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.219:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187bafe938eea5b4 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-26 22:44:55.263086004 +0000 UTC m=+1423.953320904,LastTimestamp:2025-11-26 22:44:55.263086004 +0000 UTC m=+1423.953320904,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.453731 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.454791 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.455018 4903 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.586262 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05d39c3e-b34f-42ae-ad74-b18e1fd0fced-combined-ca-bundle\") pod \"05d39c3e-b34f-42ae-ad74-b18e1fd0fced\" (UID: \"05d39c3e-b34f-42ae-ad74-b18e1fd0fced\") " Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.588017 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05d39c3e-b34f-42ae-ad74-b18e1fd0fced-config-data\") pod \"05d39c3e-b34f-42ae-ad74-b18e1fd0fced\" (UID: \"05d39c3e-b34f-42ae-ad74-b18e1fd0fced\") " Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.588135 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-99wml\" (UniqueName: \"kubernetes.io/projected/05d39c3e-b34f-42ae-ad74-b18e1fd0fced-kube-api-access-99wml\") pod \"05d39c3e-b34f-42ae-ad74-b18e1fd0fced\" (UID: \"05d39c3e-b34f-42ae-ad74-b18e1fd0fced\") " Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.588191 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05d39c3e-b34f-42ae-ad74-b18e1fd0fced-scripts\") pod \"05d39c3e-b34f-42ae-ad74-b18e1fd0fced\" (UID: \"05d39c3e-b34f-42ae-ad74-b18e1fd0fced\") " Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.591401 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05d39c3e-b34f-42ae-ad74-b18e1fd0fced-kube-api-access-99wml" (OuterVolumeSpecName: "kube-api-access-99wml") pod "05d39c3e-b34f-42ae-ad74-b18e1fd0fced" (UID: "05d39c3e-b34f-42ae-ad74-b18e1fd0fced"). InnerVolumeSpecName "kube-api-access-99wml". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.592832 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05d39c3e-b34f-42ae-ad74-b18e1fd0fced-scripts" (OuterVolumeSpecName: "scripts") pod "05d39c3e-b34f-42ae-ad74-b18e1fd0fced" (UID: "05d39c3e-b34f-42ae-ad74-b18e1fd0fced"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.600065 4903 generic.go:334] "Generic (PLEG): container finished" podID="d22f4962-7fe8-4565-92df-3316c71e2079" containerID="93194c7804ba81ee6366335707d52c163ec4a460a87469b3e70d1f40135d5ba6" exitCode=0 Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.600116 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"d22f4962-7fe8-4565-92df-3316c71e2079","Type":"ContainerDied","Data":"93194c7804ba81ee6366335707d52c163ec4a460a87469b3e70d1f40135d5ba6"} Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.601115 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.601526 4903 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.601924 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.603143 4903 generic.go:334] "Generic (PLEG): container finished" podID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" containerID="f94a9685d95b73faa64569f69fde2e4349a42fdc2b8f86e5268fed3277f30a31" exitCode=137 Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.603166 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.603191 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"05d39c3e-b34f-42ae-ad74-b18e1fd0fced","Type":"ContainerDied","Data":"f94a9685d95b73faa64569f69fde2e4349a42fdc2b8f86e5268fed3277f30a31"} Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.604020 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"05d39c3e-b34f-42ae-ad74-b18e1fd0fced","Type":"ContainerDied","Data":"89c2f1c24ae6aa9086bcb22b34860a5b735a035ed4798f4af663f97736404f08"} Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.604038 4903 scope.go:117] "RemoveContainer" containerID="f94a9685d95b73faa64569f69fde2e4349a42fdc2b8f86e5268fed3277f30a31" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.604510 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.604724 4903 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.604970 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.611870 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.622767 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.623489 4903 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238" exitCode=0 Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.623517 4903 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43" exitCode=0 Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.623528 4903 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d" exitCode=0 Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.623536 4903 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800" exitCode=2 Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.624962 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"f4ff8b18c8f3fb6a5d7ded6ab24556dae2c55485ce970d68cb743ff12aec18ed"} Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.626805 4903 generic.go:334] "Generic (PLEG): container finished" podID="23d7d0fe-46b7-41b2-b568-56f18f564748" containerID="98873735f433a17dae345725586347dd37ad1acefe9ea956f5660e6dc8b718b2" exitCode=0 Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.626831 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bxpxf" event={"ID":"23d7d0fe-46b7-41b2-b568-56f18f564748","Type":"ContainerDied","Data":"98873735f433a17dae345725586347dd37ad1acefe9ea956f5660e6dc8b718b2"} Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.629860 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.631459 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.631997 4903 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.632449 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.638928 4903 scope.go:117] "RemoveContainer" containerID="a4d3f1ffa5451c91c503b3bea407b54f1c3e7d887c84556618022ed81b5efc84" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.674849 4903 scope.go:117] "RemoveContainer" containerID="8c1bee17465b80b4cb1a11d97be13619f4c069e51c73e13c10c3a1944254b9ba" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.691726 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-99wml\" (UniqueName: \"kubernetes.io/projected/05d39c3e-b34f-42ae-ad74-b18e1fd0fced-kube-api-access-99wml\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.691761 4903 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05d39c3e-b34f-42ae-ad74-b18e1fd0fced-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.698161 4903 scope.go:117] "RemoveContainer" containerID="1bcbf8c97528e31716b7273f2ed140fd10e0e7cb9cb0eeb0ac0cb99627562fd5" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.715853 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05d39c3e-b34f-42ae-ad74-b18e1fd0fced-config-data" (OuterVolumeSpecName: "config-data") pod "05d39c3e-b34f-42ae-ad74-b18e1fd0fced" (UID: "05d39c3e-b34f-42ae-ad74-b18e1fd0fced"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.718308 4903 scope.go:117] "RemoveContainer" containerID="f94a9685d95b73faa64569f69fde2e4349a42fdc2b8f86e5268fed3277f30a31" Nov 26 22:44:55 crc kubenswrapper[4903]: E1126 22:44:55.718760 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f94a9685d95b73faa64569f69fde2e4349a42fdc2b8f86e5268fed3277f30a31\": container with ID starting with f94a9685d95b73faa64569f69fde2e4349a42fdc2b8f86e5268fed3277f30a31 not found: ID does not exist" containerID="f94a9685d95b73faa64569f69fde2e4349a42fdc2b8f86e5268fed3277f30a31" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.718791 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f94a9685d95b73faa64569f69fde2e4349a42fdc2b8f86e5268fed3277f30a31"} err="failed to get container status \"f94a9685d95b73faa64569f69fde2e4349a42fdc2b8f86e5268fed3277f30a31\": rpc error: code = NotFound desc = could not find container \"f94a9685d95b73faa64569f69fde2e4349a42fdc2b8f86e5268fed3277f30a31\": container with ID starting with f94a9685d95b73faa64569f69fde2e4349a42fdc2b8f86e5268fed3277f30a31 not found: ID does not exist" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.718811 4903 scope.go:117] "RemoveContainer" containerID="a4d3f1ffa5451c91c503b3bea407b54f1c3e7d887c84556618022ed81b5efc84" Nov 26 22:44:55 crc kubenswrapper[4903]: E1126 22:44:55.719050 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a4d3f1ffa5451c91c503b3bea407b54f1c3e7d887c84556618022ed81b5efc84\": container with ID starting with a4d3f1ffa5451c91c503b3bea407b54f1c3e7d887c84556618022ed81b5efc84 not found: ID does not exist" containerID="a4d3f1ffa5451c91c503b3bea407b54f1c3e7d887c84556618022ed81b5efc84" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.719071 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a4d3f1ffa5451c91c503b3bea407b54f1c3e7d887c84556618022ed81b5efc84"} err="failed to get container status \"a4d3f1ffa5451c91c503b3bea407b54f1c3e7d887c84556618022ed81b5efc84\": rpc error: code = NotFound desc = could not find container \"a4d3f1ffa5451c91c503b3bea407b54f1c3e7d887c84556618022ed81b5efc84\": container with ID starting with a4d3f1ffa5451c91c503b3bea407b54f1c3e7d887c84556618022ed81b5efc84 not found: ID does not exist" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.719088 4903 scope.go:117] "RemoveContainer" containerID="8c1bee17465b80b4cb1a11d97be13619f4c069e51c73e13c10c3a1944254b9ba" Nov 26 22:44:55 crc kubenswrapper[4903]: E1126 22:44:55.719427 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c1bee17465b80b4cb1a11d97be13619f4c069e51c73e13c10c3a1944254b9ba\": container with ID starting with 8c1bee17465b80b4cb1a11d97be13619f4c069e51c73e13c10c3a1944254b9ba not found: ID does not exist" containerID="8c1bee17465b80b4cb1a11d97be13619f4c069e51c73e13c10c3a1944254b9ba" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.719448 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c1bee17465b80b4cb1a11d97be13619f4c069e51c73e13c10c3a1944254b9ba"} err="failed to get container status \"8c1bee17465b80b4cb1a11d97be13619f4c069e51c73e13c10c3a1944254b9ba\": rpc error: code = NotFound desc = could not find container \"8c1bee17465b80b4cb1a11d97be13619f4c069e51c73e13c10c3a1944254b9ba\": container with ID starting with 8c1bee17465b80b4cb1a11d97be13619f4c069e51c73e13c10c3a1944254b9ba not found: ID does not exist" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.719461 4903 scope.go:117] "RemoveContainer" containerID="1bcbf8c97528e31716b7273f2ed140fd10e0e7cb9cb0eeb0ac0cb99627562fd5" Nov 26 22:44:55 crc kubenswrapper[4903]: E1126 22:44:55.719808 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1bcbf8c97528e31716b7273f2ed140fd10e0e7cb9cb0eeb0ac0cb99627562fd5\": container with ID starting with 1bcbf8c97528e31716b7273f2ed140fd10e0e7cb9cb0eeb0ac0cb99627562fd5 not found: ID does not exist" containerID="1bcbf8c97528e31716b7273f2ed140fd10e0e7cb9cb0eeb0ac0cb99627562fd5" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.719835 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1bcbf8c97528e31716b7273f2ed140fd10e0e7cb9cb0eeb0ac0cb99627562fd5"} err="failed to get container status \"1bcbf8c97528e31716b7273f2ed140fd10e0e7cb9cb0eeb0ac0cb99627562fd5\": rpc error: code = NotFound desc = could not find container \"1bcbf8c97528e31716b7273f2ed140fd10e0e7cb9cb0eeb0ac0cb99627562fd5\": container with ID starting with 1bcbf8c97528e31716b7273f2ed140fd10e0e7cb9cb0eeb0ac0cb99627562fd5 not found: ID does not exist" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.719849 4903 scope.go:117] "RemoveContainer" containerID="c2ec86bfe0b6fbe3ecf9c601552850d3a7426bce517945c14006cc027f841e51" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.757679 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05d39c3e-b34f-42ae-ad74-b18e1fd0fced-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "05d39c3e-b34f-42ae-ad74-b18e1fd0fced" (UID: "05d39c3e-b34f-42ae-ad74-b18e1fd0fced"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.794543 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05d39c3e-b34f-42ae-ad74-b18e1fd0fced-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.794593 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05d39c3e-b34f-42ae-ad74-b18e1fd0fced-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.931336 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.931784 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.932471 4903 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:55 crc kubenswrapper[4903]: I1126 22:44:55.933175 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:56 crc kubenswrapper[4903]: E1126 22:44:56.068319 4903 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:56 crc kubenswrapper[4903]: E1126 22:44:56.069240 4903 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:56 crc kubenswrapper[4903]: E1126 22:44:56.069633 4903 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:56 crc kubenswrapper[4903]: E1126 22:44:56.069921 4903 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:56 crc kubenswrapper[4903]: E1126 22:44:56.070197 4903 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:56 crc kubenswrapper[4903]: I1126 22:44:56.070229 4903 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Nov 26 22:44:56 crc kubenswrapper[4903]: E1126 22:44:56.070449 4903 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.219:6443: connect: connection refused" interval="200ms" Nov 26 22:44:56 crc kubenswrapper[4903]: E1126 22:44:56.273575 4903 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.219:6443: connect: connection refused" interval="400ms" Nov 26 22:44:56 crc kubenswrapper[4903]: I1126 22:44:56.642721 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 26 22:44:56 crc kubenswrapper[4903]: I1126 22:44:56.645816 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"9955553cedd8a77216e8c23eb0fc72868056b51a3deb242c30a0129a6a422bb4"} Nov 26 22:44:56 crc kubenswrapper[4903]: E1126 22:44:56.647070 4903 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.219:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 22:44:56 crc kubenswrapper[4903]: I1126 22:44:56.647184 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:56 crc kubenswrapper[4903]: I1126 22:44:56.647880 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:56 crc kubenswrapper[4903]: I1126 22:44:56.648821 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:56 crc kubenswrapper[4903]: E1126 22:44:56.674962 4903 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.219:6443: connect: connection refused" interval="800ms" Nov 26 22:44:56 crc kubenswrapper[4903]: I1126 22:44:56.974950 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.199929 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.200276 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.216205 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.218676 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.219208 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.219710 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.339009 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d22f4962-7fe8-4565-92df-3316c71e2079-kube-api-access\") pod \"d22f4962-7fe8-4565-92df-3316c71e2079\" (UID: \"d22f4962-7fe8-4565-92df-3316c71e2079\") " Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.345187 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d22f4962-7fe8-4565-92df-3316c71e2079-kubelet-dir\") pod \"d22f4962-7fe8-4565-92df-3316c71e2079\" (UID: \"d22f4962-7fe8-4565-92df-3316c71e2079\") " Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.345279 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d22f4962-7fe8-4565-92df-3316c71e2079-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "d22f4962-7fe8-4565-92df-3316c71e2079" (UID: "d22f4962-7fe8-4565-92df-3316c71e2079"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.345310 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/d22f4962-7fe8-4565-92df-3316c71e2079-var-lock\") pod \"d22f4962-7fe8-4565-92df-3316c71e2079\" (UID: \"d22f4962-7fe8-4565-92df-3316c71e2079\") " Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.345411 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d22f4962-7fe8-4565-92df-3316c71e2079-var-lock" (OuterVolumeSpecName: "var-lock") pod "d22f4962-7fe8-4565-92df-3316c71e2079" (UID: "d22f4962-7fe8-4565-92df-3316c71e2079"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.345973 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d22f4962-7fe8-4565-92df-3316c71e2079-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "d22f4962-7fe8-4565-92df-3316c71e2079" (UID: "d22f4962-7fe8-4565-92df-3316c71e2079"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.349408 4903 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d22f4962-7fe8-4565-92df-3316c71e2079-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.349461 4903 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/d22f4962-7fe8-4565-92df-3316c71e2079-var-lock\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.349486 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d22f4962-7fe8-4565-92df-3316c71e2079-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.468371 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.470485 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.471534 4903 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.472012 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.472573 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.473066 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:57 crc kubenswrapper[4903]: E1126 22:44:57.475580 4903 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.219:6443: connect: connection refused" interval="1.6s" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.552666 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.552863 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.552961 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.553008 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.553133 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.553296 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.554285 4903 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.554321 4903 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.554340 4903 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.666129 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.667470 4903 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43" exitCode=0 Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.667570 4903 scope.go:117] "RemoveContainer" containerID="f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.667596 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.671175 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bxpxf" event={"ID":"23d7d0fe-46b7-41b2-b568-56f18f564748","Type":"ContainerStarted","Data":"6173fbea37d148de543b8886e044de341f50ca684a84a6ee279b65425039b006"} Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.672078 4903 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.672530 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.673083 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.673558 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.675532 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.676868 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"d22f4962-7fe8-4565-92df-3316c71e2079","Type":"ContainerDied","Data":"fae5c30b01a08b631984a28115d2e1a9d6e35b5b081830ed7151bb47121ac1d2"} Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.676935 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fae5c30b01a08b631984a28115d2e1a9d6e35b5b081830ed7151bb47121ac1d2" Nov 26 22:44:57 crc kubenswrapper[4903]: E1126 22:44:57.676927 4903 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.219:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.705620 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.706164 4903 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.706648 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.706822 4903 scope.go:117] "RemoveContainer" containerID="d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.707071 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.711954 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.712499 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.713085 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.713527 4903 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.744542 4903 scope.go:117] "RemoveContainer" containerID="0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.788949 4903 scope.go:117] "RemoveContainer" containerID="33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.816967 4903 scope.go:117] "RemoveContainer" containerID="9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.851390 4903 scope.go:117] "RemoveContainer" containerID="a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.891531 4903 scope.go:117] "RemoveContainer" containerID="f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238" Nov 26 22:44:57 crc kubenswrapper[4903]: E1126 22:44:57.893374 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\": container with ID starting with f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238 not found: ID does not exist" containerID="f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.893422 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238"} err="failed to get container status \"f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\": rpc error: code = NotFound desc = could not find container \"f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238\": container with ID starting with f4a1871ec7f2ffb17d6b2baca36c75e60619019b4a8ce667679f25f1b9c66238 not found: ID does not exist" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.893450 4903 scope.go:117] "RemoveContainer" containerID="d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43" Nov 26 22:44:57 crc kubenswrapper[4903]: E1126 22:44:57.894926 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\": container with ID starting with d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43 not found: ID does not exist" containerID="d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.894966 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43"} err="failed to get container status \"d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\": rpc error: code = NotFound desc = could not find container \"d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43\": container with ID starting with d50d8492a52cfdef684301686e1eff780743a16773a571321b9d9e3d0ada6c43 not found: ID does not exist" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.894997 4903 scope.go:117] "RemoveContainer" containerID="0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d" Nov 26 22:44:57 crc kubenswrapper[4903]: E1126 22:44:57.895429 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\": container with ID starting with 0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d not found: ID does not exist" containerID="0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.895465 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d"} err="failed to get container status \"0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\": rpc error: code = NotFound desc = could not find container \"0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d\": container with ID starting with 0f837c1d060b786e37e06cca49863b642f5d4d7ad1dfc4019a138eba16fbc00d not found: ID does not exist" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.895480 4903 scope.go:117] "RemoveContainer" containerID="33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800" Nov 26 22:44:57 crc kubenswrapper[4903]: E1126 22:44:57.898057 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\": container with ID starting with 33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800 not found: ID does not exist" containerID="33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.898114 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800"} err="failed to get container status \"33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\": rpc error: code = NotFound desc = could not find container \"33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800\": container with ID starting with 33e004438d140a5fcb3b85fa1a9b011111453c02b138d47d6f36705d6d5e2800 not found: ID does not exist" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.898144 4903 scope.go:117] "RemoveContainer" containerID="9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43" Nov 26 22:44:57 crc kubenswrapper[4903]: E1126 22:44:57.901527 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\": container with ID starting with 9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43 not found: ID does not exist" containerID="9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.901661 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43"} err="failed to get container status \"9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\": rpc error: code = NotFound desc = could not find container \"9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43\": container with ID starting with 9910e37084e07e7f52688e8d1cc262b36896b3e938b3a9375a0059d1ae523f43 not found: ID does not exist" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.901854 4903 scope.go:117] "RemoveContainer" containerID="a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b" Nov 26 22:44:57 crc kubenswrapper[4903]: E1126 22:44:57.902607 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\": container with ID starting with a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b not found: ID does not exist" containerID="a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b" Nov 26 22:44:57 crc kubenswrapper[4903]: I1126 22:44:57.902755 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b"} err="failed to get container status \"a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\": rpc error: code = NotFound desc = could not find container \"a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b\": container with ID starting with a76d8c930c6f57b7c35107c1b8f5e38c4bcce38a56333c9e316601e0ab5d429b not found: ID does not exist" Nov 26 22:44:58 crc kubenswrapper[4903]: I1126 22:44:58.044512 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Nov 26 22:44:59 crc kubenswrapper[4903]: E1126 22:44:59.076439 4903 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.219:6443: connect: connection refused" interval="3.2s" Nov 26 22:44:59 crc kubenswrapper[4903]: I1126 22:44:59.454194 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 22:44:59 crc kubenswrapper[4903]: I1126 22:44:59.454270 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 22:45:00 crc kubenswrapper[4903]: I1126 22:45:00.472870 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="d23a575d-55d9-4805-bfee-09f92b0b97ef" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.251:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 22:45:00 crc kubenswrapper[4903]: I1126 22:45:00.472904 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="d23a575d-55d9-4805-bfee-09f92b0b97ef" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.251:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 22:45:00 crc kubenswrapper[4903]: I1126 22:45:00.720499 4903 generic.go:334] "Generic (PLEG): container finished" podID="23d7d0fe-46b7-41b2-b568-56f18f564748" containerID="6173fbea37d148de543b8886e044de341f50ca684a84a6ee279b65425039b006" exitCode=0 Nov 26 22:45:00 crc kubenswrapper[4903]: I1126 22:45:00.720547 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bxpxf" event={"ID":"23d7d0fe-46b7-41b2-b568-56f18f564748","Type":"ContainerDied","Data":"6173fbea37d148de543b8886e044de341f50ca684a84a6ee279b65425039b006"} Nov 26 22:45:00 crc kubenswrapper[4903]: I1126 22:45:00.721578 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:00 crc kubenswrapper[4903]: I1126 22:45:00.724246 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:00 crc kubenswrapper[4903]: I1126 22:45:00.724657 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:01 crc kubenswrapper[4903]: E1126 22:45:01.209787 4903 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.219:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187bafe938eea5b4 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-26 22:44:55.263086004 +0000 UTC m=+1423.953320904,LastTimestamp:2025-11-26 22:44:55.263086004 +0000 UTC m=+1423.953320904,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 26 22:45:01 crc kubenswrapper[4903]: I1126 22:45:01.758338 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bxpxf" event={"ID":"23d7d0fe-46b7-41b2-b568-56f18f564748","Type":"ContainerStarted","Data":"c59a336dd1d693b483d5ed67064dde4971aefe60414b1fb8ebef5380723e10f9"} Nov 26 22:45:01 crc kubenswrapper[4903]: I1126 22:45:01.759890 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:01 crc kubenswrapper[4903]: I1126 22:45:01.760161 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:01 crc kubenswrapper[4903]: I1126 22:45:01.760397 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:01 crc kubenswrapper[4903]: I1126 22:45:01.967187 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 26 22:45:01 crc kubenswrapper[4903]: I1126 22:45:01.981146 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 22:45:01 crc kubenswrapper[4903]: I1126 22:45:01.981203 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 22:45:02 crc kubenswrapper[4903]: I1126 22:45:02.024302 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 26 22:45:02 crc kubenswrapper[4903]: I1126 22:45:02.025095 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:02 crc kubenswrapper[4903]: I1126 22:45:02.025897 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:02 crc kubenswrapper[4903]: I1126 22:45:02.026550 4903 status_manager.go:851] "Failed to get status for pod" podUID="646d5a7a-f188-4dc2-99ac-24c16bcf59fc" pod="openstack/nova-scheduler-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/nova-scheduler-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:02 crc kubenswrapper[4903]: I1126 22:45:02.027015 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:02 crc kubenswrapper[4903]: I1126 22:45:02.038675 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:02 crc kubenswrapper[4903]: I1126 22:45:02.039144 4903 status_manager.go:851] "Failed to get status for pod" podUID="646d5a7a-f188-4dc2-99ac-24c16bcf59fc" pod="openstack/nova-scheduler-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/nova-scheduler-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:02 crc kubenswrapper[4903]: I1126 22:45:02.039527 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:02 crc kubenswrapper[4903]: I1126 22:45:02.039999 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:02 crc kubenswrapper[4903]: I1126 22:45:02.200563 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 22:45:02 crc kubenswrapper[4903]: I1126 22:45:02.201149 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 22:45:02 crc kubenswrapper[4903]: E1126 22:45:02.277589 4903 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.219:6443: connect: connection refused" interval="6.4s" Nov 26 22:45:02 crc kubenswrapper[4903]: I1126 22:45:02.460194 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/kube-state-metrics-0" podUID="f58d4082-e69c-44e2-9961-9842cb738869" containerName="kube-state-metrics" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 26 22:45:02 crc kubenswrapper[4903]: I1126 22:45:02.828628 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 26 22:45:02 crc kubenswrapper[4903]: I1126 22:45:02.829543 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:02 crc kubenswrapper[4903]: I1126 22:45:02.830049 4903 status_manager.go:851] "Failed to get status for pod" podUID="646d5a7a-f188-4dc2-99ac-24c16bcf59fc" pod="openstack/nova-scheduler-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/nova-scheduler-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:02 crc kubenswrapper[4903]: I1126 22:45:02.830541 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:02 crc kubenswrapper[4903]: I1126 22:45:02.831101 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:03 crc kubenswrapper[4903]: I1126 22:45:03.213846 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="ecafb017-7ef9-492e-95d5-d297ec3c9725" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.253:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 22:45:03 crc kubenswrapper[4903]: I1126 22:45:03.213861 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="ecafb017-7ef9-492e-95d5-d297ec3c9725" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.253:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 22:45:03 crc kubenswrapper[4903]: I1126 22:45:03.956995 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bxpxf" Nov 26 22:45:03 crc kubenswrapper[4903]: I1126 22:45:03.957303 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bxpxf" Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.030071 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bxpxf" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" containerName="registry-server" probeResult="failure" output=< Nov 26 22:45:05 crc kubenswrapper[4903]: timeout: failed to connect service ":50051" within 1s Nov 26 22:45:05 crc kubenswrapper[4903]: > Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.822624 4903 generic.go:334] "Generic (PLEG): container finished" podID="a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b" containerID="4db164b02efe5a55e76b3ecf414cfdacfb7e242134eada8ffa7395d466deb281" exitCode=1 Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.822820 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" event={"ID":"a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b","Type":"ContainerDied","Data":"4db164b02efe5a55e76b3ecf414cfdacfb7e242134eada8ffa7395d466deb281"} Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.823733 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.824058 4903 status_manager.go:851] "Failed to get status for pod" podUID="646d5a7a-f188-4dc2-99ac-24c16bcf59fc" pod="openstack/nova-scheduler-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/nova-scheduler-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.824185 4903 scope.go:117] "RemoveContainer" containerID="4db164b02efe5a55e76b3ecf414cfdacfb7e242134eada8ffa7395d466deb281" Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.824343 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.824578 4903 status_manager.go:851] "Failed to get status for pod" podUID="a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operators-redhat/pods/loki-operator-controller-manager-5c85bfb685-pwxll\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.824953 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.827281 4903 generic.go:334] "Generic (PLEG): container finished" podID="ced64189-a8c9-4e13-956b-f69139a9602b" containerID="b0c82492969d9ad2b32b84c61b0c3510a7d57217a9f52034e2506cd6c78405d8" exitCode=1 Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.827323 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" event={"ID":"ced64189-a8c9-4e13-956b-f69139a9602b","Type":"ContainerDied","Data":"b0c82492969d9ad2b32b84c61b0c3510a7d57217a9f52034e2506cd6c78405d8"} Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.828425 4903 scope.go:117] "RemoveContainer" containerID="b0c82492969d9ad2b32b84c61b0c3510a7d57217a9f52034e2506cd6c78405d8" Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.828547 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.829152 4903 status_manager.go:851] "Failed to get status for pod" podUID="ced64189-a8c9-4e13-956b-f69139a9602b" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-bm7r7\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.829644 4903 status_manager.go:851] "Failed to get status for pod" podUID="646d5a7a-f188-4dc2-99ac-24c16bcf59fc" pod="openstack/nova-scheduler-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/nova-scheduler-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.831649 4903 status_manager.go:851] "Failed to get status for pod" podUID="a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operators-redhat/pods/loki-operator-controller-manager-5c85bfb685-pwxll\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.832208 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.833148 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.833733 4903 generic.go:334] "Generic (PLEG): container finished" podID="6b930423-80e6-4e2c-825f-7deceec090f5" containerID="fabbb040c796ce55d9636c6dabe59f8527dc2b9e084baa50d784f1be11e0e2e9" exitCode=1 Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.833804 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" event={"ID":"6b930423-80e6-4e2c-825f-7deceec090f5","Type":"ContainerDied","Data":"fabbb040c796ce55d9636c6dabe59f8527dc2b9e084baa50d784f1be11e0e2e9"} Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.834383 4903 scope.go:117] "RemoveContainer" containerID="fabbb040c796ce55d9636c6dabe59f8527dc2b9e084baa50d784f1be11e0e2e9" Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.834585 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.835250 4903 status_manager.go:851] "Failed to get status for pod" podUID="ced64189-a8c9-4e13-956b-f69139a9602b" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-bm7r7\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.835737 4903 status_manager.go:851] "Failed to get status for pod" podUID="646d5a7a-f188-4dc2-99ac-24c16bcf59fc" pod="openstack/nova-scheduler-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/nova-scheduler-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.835989 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.836214 4903 status_manager.go:851] "Failed to get status for pod" podUID="a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operators-redhat/pods/loki-operator-controller-manager-5c85bfb685-pwxll\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.836635 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.837147 4903 status_manager.go:851] "Failed to get status for pod" podUID="6b930423-80e6-4e2c-825f-7deceec090f5" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-nz8x4\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.838325 4903 generic.go:334] "Generic (PLEG): container finished" podID="b5900302-4880-4732-a477-8ed6cf3bfec3" containerID="c77f86cbf5009902eeebfa382a21dc318a865c783305069fa931fe705dbfda82" exitCode=1 Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.838407 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" event={"ID":"b5900302-4880-4732-a477-8ed6cf3bfec3","Type":"ContainerDied","Data":"c77f86cbf5009902eeebfa382a21dc318a865c783305069fa931fe705dbfda82"} Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.839686 4903 scope.go:117] "RemoveContainer" containerID="c77f86cbf5009902eeebfa382a21dc318a865c783305069fa931fe705dbfda82" Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.840926 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.841333 4903 status_manager.go:851] "Failed to get status for pod" podUID="a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operators-redhat/pods/loki-operator-controller-manager-5c85bfb685-pwxll\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.842043 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.842342 4903 status_manager.go:851] "Failed to get status for pod" podUID="6b930423-80e6-4e2c-825f-7deceec090f5" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-nz8x4\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.842595 4903 status_manager.go:851] "Failed to get status for pod" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-57594f7c4c-gdzqb\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.842880 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.843111 4903 status_manager.go:851] "Failed to get status for pod" podUID="ced64189-a8c9-4e13-956b-f69139a9602b" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-bm7r7\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:05 crc kubenswrapper[4903]: I1126 22:45:05.843373 4903 status_manager.go:851] "Failed to get status for pod" podUID="646d5a7a-f188-4dc2-99ac-24c16bcf59fc" pod="openstack/nova-scheduler-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/nova-scheduler-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.569008 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="6253c12a-e41a-4476-abda-3b3b7ff084b3" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.857925 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" event={"ID":"a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b","Type":"ContainerStarted","Data":"4a9ce01e8190614b5817edfd46640fb83e50ea4c618bc40f5640b3aa7efe9204"} Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.859052 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.859967 4903 status_manager.go:851] "Failed to get status for pod" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-57594f7c4c-gdzqb\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.860330 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.861146 4903 status_manager.go:851] "Failed to get status for pod" podUID="ced64189-a8c9-4e13-956b-f69139a9602b" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-bm7r7\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.861637 4903 status_manager.go:851] "Failed to get status for pod" podUID="646d5a7a-f188-4dc2-99ac-24c16bcf59fc" pod="openstack/nova-scheduler-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/nova-scheduler-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.862238 4903 status_manager.go:851] "Failed to get status for pod" podUID="a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operators-redhat/pods/loki-operator-controller-manager-5c85bfb685-pwxll\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.862426 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.863903 4903 generic.go:334] "Generic (PLEG): container finished" podID="ced64189-a8c9-4e13-956b-f69139a9602b" containerID="24ffd058d044769113f4b6185890f4a4c57ff005b352c63ff9c34e9c1de461de" exitCode=1 Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.863887 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.863961 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" event={"ID":"ced64189-a8c9-4e13-956b-f69139a9602b","Type":"ContainerDied","Data":"24ffd058d044769113f4b6185890f4a4c57ff005b352c63ff9c34e9c1de461de"} Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.864117 4903 scope.go:117] "RemoveContainer" containerID="b0c82492969d9ad2b32b84c61b0c3510a7d57217a9f52034e2506cd6c78405d8" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.864237 4903 status_manager.go:851] "Failed to get status for pod" podUID="6b930423-80e6-4e2c-825f-7deceec090f5" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-nz8x4\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.864761 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.865017 4903 status_manager.go:851] "Failed to get status for pod" podUID="ced64189-a8c9-4e13-956b-f69139a9602b" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-bm7r7\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.865254 4903 status_manager.go:851] "Failed to get status for pod" podUID="646d5a7a-f188-4dc2-99ac-24c16bcf59fc" pod="openstack/nova-scheduler-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/nova-scheduler-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.865460 4903 status_manager.go:851] "Failed to get status for pod" podUID="a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operators-redhat/pods/loki-operator-controller-manager-5c85bfb685-pwxll\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.865653 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.865745 4903 scope.go:117] "RemoveContainer" containerID="24ffd058d044769113f4b6185890f4a4c57ff005b352c63ff9c34e9c1de461de" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.865988 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: E1126 22:45:06.866428 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ironic-operator-controller-manager-67cb4dc6d4-bm7r7_openstack-operators(ced64189-a8c9-4e13-956b-f69139a9602b)\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" podUID="ced64189-a8c9-4e13-956b-f69139a9602b" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.866575 4903 status_manager.go:851] "Failed to get status for pod" podUID="6b930423-80e6-4e2c-825f-7deceec090f5" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-nz8x4\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.867102 4903 status_manager.go:851] "Failed to get status for pod" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-57594f7c4c-gdzqb\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.867883 4903 generic.go:334] "Generic (PLEG): container finished" podID="6b930423-80e6-4e2c-825f-7deceec090f5" containerID="2e687205566a6e57fd161c08f633805da9374becf244df76b3177e89d7fde819" exitCode=1 Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.867963 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" event={"ID":"6b930423-80e6-4e2c-825f-7deceec090f5","Type":"ContainerDied","Data":"2e687205566a6e57fd161c08f633805da9374becf244df76b3177e89d7fde819"} Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.868866 4903 status_manager.go:851] "Failed to get status for pod" podUID="a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operators-redhat/pods/loki-operator-controller-manager-5c85bfb685-pwxll\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.869014 4903 scope.go:117] "RemoveContainer" containerID="2e687205566a6e57fd161c08f633805da9374becf244df76b3177e89d7fde819" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.869261 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: E1126 22:45:06.869519 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=octavia-operator-controller-manager-64cdc6ff96-nz8x4_openstack-operators(6b930423-80e6-4e2c-825f-7deceec090f5)\"" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" podUID="6b930423-80e6-4e2c-825f-7deceec090f5" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.869542 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.870123 4903 status_manager.go:851] "Failed to get status for pod" podUID="6b930423-80e6-4e2c-825f-7deceec090f5" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-nz8x4\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.870444 4903 status_manager.go:851] "Failed to get status for pod" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-57594f7c4c-gdzqb\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.870738 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.870972 4903 status_manager.go:851] "Failed to get status for pod" podUID="ced64189-a8c9-4e13-956b-f69139a9602b" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-bm7r7\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.871102 4903 generic.go:334] "Generic (PLEG): container finished" podID="b5900302-4880-4732-a477-8ed6cf3bfec3" containerID="c623167ba1f18e5ba98ad1f8c49f9107b24293bcf656ecd7df27401a948418cb" exitCode=1 Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.871127 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" event={"ID":"b5900302-4880-4732-a477-8ed6cf3bfec3","Type":"ContainerDied","Data":"c623167ba1f18e5ba98ad1f8c49f9107b24293bcf656ecd7df27401a948418cb"} Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.871156 4903 status_manager.go:851] "Failed to get status for pod" podUID="646d5a7a-f188-4dc2-99ac-24c16bcf59fc" pod="openstack/nova-scheduler-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/nova-scheduler-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.871738 4903 status_manager.go:851] "Failed to get status for pod" podUID="a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operators-redhat/pods/loki-operator-controller-manager-5c85bfb685-pwxll\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.872017 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.872202 4903 scope.go:117] "RemoveContainer" containerID="c623167ba1f18e5ba98ad1f8c49f9107b24293bcf656ecd7df27401a948418cb" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.872371 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: E1126 22:45:06.872655 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=metallb-operator-controller-manager-57594f7c4c-gdzqb_metallb-system(b5900302-4880-4732-a477-8ed6cf3bfec3)\"" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.872761 4903 status_manager.go:851] "Failed to get status for pod" podUID="6b930423-80e6-4e2c-825f-7deceec090f5" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-nz8x4\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.874607 4903 status_manager.go:851] "Failed to get status for pod" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-57594f7c4c-gdzqb\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.874994 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.875302 4903 status_manager.go:851] "Failed to get status for pod" podUID="ced64189-a8c9-4e13-956b-f69139a9602b" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-bm7r7\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.875614 4903 status_manager.go:851] "Failed to get status for pod" podUID="646d5a7a-f188-4dc2-99ac-24c16bcf59fc" pod="openstack/nova-scheduler-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/nova-scheduler-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:06 crc kubenswrapper[4903]: I1126 22:45:06.969048 4903 scope.go:117] "RemoveContainer" containerID="fabbb040c796ce55d9636c6dabe59f8527dc2b9e084baa50d784f1be11e0e2e9" Nov 26 22:45:07 crc kubenswrapper[4903]: I1126 22:45:07.051812 4903 scope.go:117] "RemoveContainer" containerID="c77f86cbf5009902eeebfa382a21dc318a865c783305069fa931fe705dbfda82" Nov 26 22:45:08 crc kubenswrapper[4903]: E1126 22:45:08.679279 4903 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.219:6443: connect: connection refused" interval="7s" Nov 26 22:45:08 crc kubenswrapper[4903]: I1126 22:45:08.896642 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" Nov 26 22:45:08 crc kubenswrapper[4903]: I1126 22:45:08.896746 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" Nov 26 22:45:08 crc kubenswrapper[4903]: I1126 22:45:08.897521 4903 scope.go:117] "RemoveContainer" containerID="24ffd058d044769113f4b6185890f4a4c57ff005b352c63ff9c34e9c1de461de" Nov 26 22:45:08 crc kubenswrapper[4903]: I1126 22:45:08.898104 4903 status_manager.go:851] "Failed to get status for pod" podUID="a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operators-redhat/pods/loki-operator-controller-manager-5c85bfb685-pwxll\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:08 crc kubenswrapper[4903]: E1126 22:45:08.898191 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ironic-operator-controller-manager-67cb4dc6d4-bm7r7_openstack-operators(ced64189-a8c9-4e13-956b-f69139a9602b)\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" podUID="ced64189-a8c9-4e13-956b-f69139a9602b" Nov 26 22:45:08 crc kubenswrapper[4903]: I1126 22:45:08.898849 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:08 crc kubenswrapper[4903]: I1126 22:45:08.899484 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:08 crc kubenswrapper[4903]: I1126 22:45:08.900126 4903 status_manager.go:851] "Failed to get status for pod" podUID="6b930423-80e6-4e2c-825f-7deceec090f5" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-nz8x4\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:08 crc kubenswrapper[4903]: I1126 22:45:08.900456 4903 status_manager.go:851] "Failed to get status for pod" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-57594f7c4c-gdzqb\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:08 crc kubenswrapper[4903]: I1126 22:45:08.900838 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:08 crc kubenswrapper[4903]: I1126 22:45:08.901191 4903 status_manager.go:851] "Failed to get status for pod" podUID="ced64189-a8c9-4e13-956b-f69139a9602b" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-bm7r7\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:08 crc kubenswrapper[4903]: I1126 22:45:08.901506 4903 status_manager.go:851] "Failed to get status for pod" podUID="646d5a7a-f188-4dc2-99ac-24c16bcf59fc" pod="openstack/nova-scheduler-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/nova-scheduler-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:08 crc kubenswrapper[4903]: I1126 22:45:08.922858 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 26 22:45:08 crc kubenswrapper[4903]: I1126 22:45:08.922921 4903 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49" exitCode=1 Nov 26 22:45:08 crc kubenswrapper[4903]: I1126 22:45:08.922955 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49"} Nov 26 22:45:08 crc kubenswrapper[4903]: I1126 22:45:08.923804 4903 scope.go:117] "RemoveContainer" containerID="251bf3bc4833e7a378d5ba18ddd2196aa1241599203039c2c70bb55029a1ba49" Nov 26 22:45:08 crc kubenswrapper[4903]: I1126 22:45:08.924279 4903 status_manager.go:851] "Failed to get status for pod" podUID="ced64189-a8c9-4e13-956b-f69139a9602b" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-bm7r7\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:08 crc kubenswrapper[4903]: I1126 22:45:08.924965 4903 status_manager.go:851] "Failed to get status for pod" podUID="646d5a7a-f188-4dc2-99ac-24c16bcf59fc" pod="openstack/nova-scheduler-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/nova-scheduler-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:08 crc kubenswrapper[4903]: I1126 22:45:08.925288 4903 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:08 crc kubenswrapper[4903]: I1126 22:45:08.925535 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:08 crc kubenswrapper[4903]: I1126 22:45:08.925793 4903 status_manager.go:851] "Failed to get status for pod" podUID="a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operators-redhat/pods/loki-operator-controller-manager-5c85bfb685-pwxll\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:08 crc kubenswrapper[4903]: I1126 22:45:08.926003 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:08 crc kubenswrapper[4903]: I1126 22:45:08.926242 4903 status_manager.go:851] "Failed to get status for pod" podUID="6b930423-80e6-4e2c-825f-7deceec090f5" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-nz8x4\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:08 crc kubenswrapper[4903]: I1126 22:45:08.926450 4903 status_manager.go:851] "Failed to get status for pod" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-57594f7c4c-gdzqb\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:08 crc kubenswrapper[4903]: I1126 22:45:08.926881 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: E1126 22:45:09.094434 4903 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openstack/persistence-rabbitmq-cell1-server-0: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/persistentvolumeclaims/persistence-rabbitmq-cell1-server-0\": dial tcp 38.102.83.219:6443: connect: connection refused" pod="openstack/rabbitmq-cell1-server-0" volumeName="persistence" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.402983 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.403574 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.404154 4903 scope.go:117] "RemoveContainer" containerID="2e687205566a6e57fd161c08f633805da9374becf244df76b3177e89d7fde819" Nov 26 22:45:09 crc kubenswrapper[4903]: E1126 22:45:09.404486 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=octavia-operator-controller-manager-64cdc6ff96-nz8x4_openstack-operators(6b930423-80e6-4e2c-825f-7deceec090f5)\"" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" podUID="6b930423-80e6-4e2c-825f-7deceec090f5" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.404955 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.405439 4903 status_manager.go:851] "Failed to get status for pod" podUID="ced64189-a8c9-4e13-956b-f69139a9602b" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-bm7r7\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.405988 4903 status_manager.go:851] "Failed to get status for pod" podUID="646d5a7a-f188-4dc2-99ac-24c16bcf59fc" pod="openstack/nova-scheduler-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/nova-scheduler-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.406300 4903 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.406641 4903 status_manager.go:851] "Failed to get status for pod" podUID="a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operators-redhat/pods/loki-operator-controller-manager-5c85bfb685-pwxll\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.407055 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.407363 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.407680 4903 status_manager.go:851] "Failed to get status for pod" podUID="6b930423-80e6-4e2c-825f-7deceec090f5" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-nz8x4\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.408067 4903 status_manager.go:851] "Failed to get status for pod" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-57594f7c4c-gdzqb\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.460862 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.462924 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.463026 4903 status_manager.go:851] "Failed to get status for pod" podUID="ced64189-a8c9-4e13-956b-f69139a9602b" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-bm7r7\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.463248 4903 status_manager.go:851] "Failed to get status for pod" podUID="d23a575d-55d9-4805-bfee-09f92b0b97ef" pod="openstack/nova-api-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/nova-api-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.463597 4903 status_manager.go:851] "Failed to get status for pod" podUID="646d5a7a-f188-4dc2-99ac-24c16bcf59fc" pod="openstack/nova-scheduler-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/nova-scheduler-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.463791 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.464051 4903 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.464481 4903 status_manager.go:851] "Failed to get status for pod" podUID="a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operators-redhat/pods/loki-operator-controller-manager-5c85bfb685-pwxll\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.465009 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.465427 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.465743 4903 status_manager.go:851] "Failed to get status for pod" podUID="6b930423-80e6-4e2c-825f-7deceec090f5" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-nz8x4\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.466069 4903 status_manager.go:851] "Failed to get status for pod" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-57594f7c4c-gdzqb\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.466404 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.466845 4903 status_manager.go:851] "Failed to get status for pod" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-57594f7c4c-gdzqb\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.467223 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.467551 4903 status_manager.go:851] "Failed to get status for pod" podUID="ced64189-a8c9-4e13-956b-f69139a9602b" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-bm7r7\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.467912 4903 status_manager.go:851] "Failed to get status for pod" podUID="d23a575d-55d9-4805-bfee-09f92b0b97ef" pod="openstack/nova-api-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/nova-api-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.468284 4903 status_manager.go:851] "Failed to get status for pod" podUID="646d5a7a-f188-4dc2-99ac-24c16bcf59fc" pod="openstack/nova-scheduler-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/nova-scheduler-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.468630 4903 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.468970 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.469332 4903 status_manager.go:851] "Failed to get status for pod" podUID="a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operators-redhat/pods/loki-operator-controller-manager-5c85bfb685-pwxll\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.469652 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.470036 4903 status_manager.go:851] "Failed to get status for pod" podUID="6b930423-80e6-4e2c-825f-7deceec090f5" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-nz8x4\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.470538 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.470983 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.471318 4903 status_manager.go:851] "Failed to get status for pod" podUID="6b930423-80e6-4e2c-825f-7deceec090f5" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-nz8x4\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.471620 4903 status_manager.go:851] "Failed to get status for pod" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-57594f7c4c-gdzqb\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.472004 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.472326 4903 status_manager.go:851] "Failed to get status for pod" podUID="ced64189-a8c9-4e13-956b-f69139a9602b" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-bm7r7\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.472637 4903 status_manager.go:851] "Failed to get status for pod" podUID="d23a575d-55d9-4805-bfee-09f92b0b97ef" pod="openstack/nova-api-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/nova-api-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.472986 4903 status_manager.go:851] "Failed to get status for pod" podUID="646d5a7a-f188-4dc2-99ac-24c16bcf59fc" pod="openstack/nova-scheduler-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/nova-scheduler-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.473319 4903 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.473652 4903 status_manager.go:851] "Failed to get status for pod" podUID="a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operators-redhat/pods/loki-operator-controller-manager-5c85bfb685-pwxll\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.473975 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.950686 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.950839 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"a8d0e06d9b01e61f010f5e00709fbe186f5c30cd56ab334ad06dfdf90114c3aa"} Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.953473 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.955169 4903 scope.go:117] "RemoveContainer" containerID="2e687205566a6e57fd161c08f633805da9374becf244df76b3177e89d7fde819" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.955964 4903 status_manager.go:851] "Failed to get status for pod" podUID="ced64189-a8c9-4e13-956b-f69139a9602b" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-bm7r7\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.956227 4903 status_manager.go:851] "Failed to get status for pod" podUID="d23a575d-55d9-4805-bfee-09f92b0b97ef" pod="openstack/nova-api-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/nova-api-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.956479 4903 status_manager.go:851] "Failed to get status for pod" podUID="646d5a7a-f188-4dc2-99ac-24c16bcf59fc" pod="openstack/nova-scheduler-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/nova-scheduler-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.956776 4903 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.957596 4903 status_manager.go:851] "Failed to get status for pod" podUID="a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operators-redhat/pods/loki-operator-controller-manager-5c85bfb685-pwxll\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.957919 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: E1126 22:45:09.958053 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=octavia-operator-controller-manager-64cdc6ff96-nz8x4_openstack-operators(6b930423-80e6-4e2c-825f-7deceec090f5)\"" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" podUID="6b930423-80e6-4e2c-825f-7deceec090f5" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.958178 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.958512 4903 status_manager.go:851] "Failed to get status for pod" podUID="6b930423-80e6-4e2c-825f-7deceec090f5" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-nz8x4\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.959288 4903 status_manager.go:851] "Failed to get status for pod" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-57594f7c4c-gdzqb\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.959972 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.963657 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.965794 4903 status_manager.go:851] "Failed to get status for pod" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-57594f7c4c-gdzqb\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.966410 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.967101 4903 status_manager.go:851] "Failed to get status for pod" podUID="ced64189-a8c9-4e13-956b-f69139a9602b" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-bm7r7\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.967377 4903 status_manager.go:851] "Failed to get status for pod" podUID="d23a575d-55d9-4805-bfee-09f92b0b97ef" pod="openstack/nova-api-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/nova-api-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.967661 4903 status_manager.go:851] "Failed to get status for pod" podUID="646d5a7a-f188-4dc2-99ac-24c16bcf59fc" pod="openstack/nova-scheduler-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/nova-scheduler-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.967957 4903 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.968197 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.968445 4903 status_manager.go:851] "Failed to get status for pod" podUID="a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operators-redhat/pods/loki-operator-controller-manager-5c85bfb685-pwxll\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.968798 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:09 crc kubenswrapper[4903]: I1126 22:45:09.969093 4903 status_manager.go:851] "Failed to get status for pod" podUID="6b930423-80e6-4e2c-825f-7deceec090f5" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-nz8x4\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:10 crc kubenswrapper[4903]: I1126 22:45:10.029457 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:45:10 crc kubenswrapper[4903]: I1126 22:45:10.038966 4903 status_manager.go:851] "Failed to get status for pod" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-57594f7c4c-gdzqb\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:10 crc kubenswrapper[4903]: I1126 22:45:10.039567 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:10 crc kubenswrapper[4903]: I1126 22:45:10.043653 4903 status_manager.go:851] "Failed to get status for pod" podUID="ced64189-a8c9-4e13-956b-f69139a9602b" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-bm7r7\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:10 crc kubenswrapper[4903]: I1126 22:45:10.044216 4903 status_manager.go:851] "Failed to get status for pod" podUID="d23a575d-55d9-4805-bfee-09f92b0b97ef" pod="openstack/nova-api-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/nova-api-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:10 crc kubenswrapper[4903]: I1126 22:45:10.044817 4903 status_manager.go:851] "Failed to get status for pod" podUID="646d5a7a-f188-4dc2-99ac-24c16bcf59fc" pod="openstack/nova-scheduler-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/nova-scheduler-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:10 crc kubenswrapper[4903]: I1126 22:45:10.053384 4903 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:10 crc kubenswrapper[4903]: I1126 22:45:10.053613 4903 status_manager.go:851] "Failed to get status for pod" podUID="a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operators-redhat/pods/loki-operator-controller-manager-5c85bfb685-pwxll\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:10 crc kubenswrapper[4903]: I1126 22:45:10.053857 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:10 crc kubenswrapper[4903]: I1126 22:45:10.054066 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:10 crc kubenswrapper[4903]: I1126 22:45:10.054282 4903 status_manager.go:851] "Failed to get status for pod" podUID="6b930423-80e6-4e2c-825f-7deceec090f5" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-nz8x4\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:10 crc kubenswrapper[4903]: I1126 22:45:10.068839 4903 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7f049a52-ec02-4f6f-9856-00f50e8f0293" Nov 26 22:45:10 crc kubenswrapper[4903]: I1126 22:45:10.068900 4903 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7f049a52-ec02-4f6f-9856-00f50e8f0293" Nov 26 22:45:10 crc kubenswrapper[4903]: E1126 22:45:10.069500 4903 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:45:10 crc kubenswrapper[4903]: I1126 22:45:10.070362 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:45:10 crc kubenswrapper[4903]: I1126 22:45:10.972317 4903 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="7a33772fffdde12712f990fca18a517c221df0471f7cc3bb8dc799be6e523bc6" exitCode=0 Nov 26 22:45:10 crc kubenswrapper[4903]: I1126 22:45:10.972460 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"7a33772fffdde12712f990fca18a517c221df0471f7cc3bb8dc799be6e523bc6"} Nov 26 22:45:10 crc kubenswrapper[4903]: I1126 22:45:10.973144 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"8c0411eb7c5b45141e760c3c8fb83c84bad59b965a269d3bd5631bda55eafa94"} Nov 26 22:45:10 crc kubenswrapper[4903]: I1126 22:45:10.974861 4903 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7f049a52-ec02-4f6f-9856-00f50e8f0293" Nov 26 22:45:10 crc kubenswrapper[4903]: I1126 22:45:10.974900 4903 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7f049a52-ec02-4f6f-9856-00f50e8f0293" Nov 26 22:45:10 crc kubenswrapper[4903]: E1126 22:45:10.975526 4903 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:45:10 crc kubenswrapper[4903]: I1126 22:45:10.976047 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:10 crc kubenswrapper[4903]: I1126 22:45:10.976790 4903 status_manager.go:851] "Failed to get status for pod" podUID="a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operators-redhat/pods/loki-operator-controller-manager-5c85bfb685-pwxll\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:10 crc kubenswrapper[4903]: I1126 22:45:10.977760 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:10 crc kubenswrapper[4903]: I1126 22:45:10.978471 4903 status_manager.go:851] "Failed to get status for pod" podUID="6b930423-80e6-4e2c-825f-7deceec090f5" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-nz8x4\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:10 crc kubenswrapper[4903]: I1126 22:45:10.979170 4903 status_manager.go:851] "Failed to get status for pod" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-57594f7c4c-gdzqb\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:10 crc kubenswrapper[4903]: I1126 22:45:10.980044 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:10 crc kubenswrapper[4903]: I1126 22:45:10.980688 4903 status_manager.go:851] "Failed to get status for pod" podUID="ced64189-a8c9-4e13-956b-f69139a9602b" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-bm7r7\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:10 crc kubenswrapper[4903]: I1126 22:45:10.981364 4903 status_manager.go:851] "Failed to get status for pod" podUID="d23a575d-55d9-4805-bfee-09f92b0b97ef" pod="openstack/nova-api-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/nova-api-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:10 crc kubenswrapper[4903]: I1126 22:45:10.982017 4903 status_manager.go:851] "Failed to get status for pod" podUID="646d5a7a-f188-4dc2-99ac-24c16bcf59fc" pod="openstack/nova-scheduler-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/nova-scheduler-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:10 crc kubenswrapper[4903]: I1126 22:45:10.982572 4903 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:11 crc kubenswrapper[4903]: E1126 22:45:11.055006 4903 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openstack/glance-glance-default-external-api-0: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/persistentvolumeclaims/glance-glance-default-external-api-0\": dial tcp 38.102.83.219:6443: connect: connection refused" pod="openstack/glance-default-external-api-0" volumeName="glance" Nov 26 22:45:11 crc kubenswrapper[4903]: E1126 22:45:11.211062 4903 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.219:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187bafe938eea5b4 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-26 22:44:55.263086004 +0000 UTC m=+1423.953320904,LastTimestamp:2025-11-26 22:44:55.263086004 +0000 UTC m=+1423.953320904,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.492869 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.494209 4903 scope.go:117] "RemoveContainer" containerID="c623167ba1f18e5ba98ad1f8c49f9107b24293bcf656ecd7df27401a948418cb" Nov 26 22:45:11 crc kubenswrapper[4903]: E1126 22:45:11.494588 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=metallb-operator-controller-manager-57594f7c4c-gdzqb_metallb-system(b5900302-4880-4732-a477-8ed6cf3bfec3)\"" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.494899 4903 status_manager.go:851] "Failed to get status for pod" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-57594f7c4c-gdzqb\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.495323 4903 status_manager.go:851] "Failed to get status for pod" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" pod="openstack/aodh-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/aodh-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.495594 4903 status_manager.go:851] "Failed to get status for pod" podUID="ced64189-a8c9-4e13-956b-f69139a9602b" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-bm7r7\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.495931 4903 status_manager.go:851] "Failed to get status for pod" podUID="d23a575d-55d9-4805-bfee-09f92b0b97ef" pod="openstack/nova-api-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/nova-api-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.496213 4903 status_manager.go:851] "Failed to get status for pod" podUID="646d5a7a-f188-4dc2-99ac-24c16bcf59fc" pod="openstack/nova-scheduler-0" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/nova-scheduler-0\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.496485 4903 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.496786 4903 status_manager.go:851] "Failed to get status for pod" podUID="a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operators-redhat/pods/loki-operator-controller-manager-5c85bfb685-pwxll\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.497079 4903 status_manager.go:851] "Failed to get status for pod" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.497346 4903 status_manager.go:851] "Failed to get status for pod" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" pod="openshift-marketplace/redhat-operators-bxpxf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-bxpxf\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.497637 4903 status_manager.go:851] "Failed to get status for pod" podUID="6b930423-80e6-4e2c-825f-7deceec090f5" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-nz8x4\": dial tcp 38.102.83.219:6443: connect: connection refused" Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.760943 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.777235 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6253c12a-e41a-4476-abda-3b3b7ff084b3-log-httpd\") pod \"6253c12a-e41a-4476-abda-3b3b7ff084b3\" (UID: \"6253c12a-e41a-4476-abda-3b3b7ff084b3\") " Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.777553 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6253c12a-e41a-4476-abda-3b3b7ff084b3-config-data\") pod \"6253c12a-e41a-4476-abda-3b3b7ff084b3\" (UID: \"6253c12a-e41a-4476-abda-3b3b7ff084b3\") " Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.777573 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6253c12a-e41a-4476-abda-3b3b7ff084b3-run-httpd\") pod \"6253c12a-e41a-4476-abda-3b3b7ff084b3\" (UID: \"6253c12a-e41a-4476-abda-3b3b7ff084b3\") " Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.777600 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9w8c5\" (UniqueName: \"kubernetes.io/projected/6253c12a-e41a-4476-abda-3b3b7ff084b3-kube-api-access-9w8c5\") pod \"6253c12a-e41a-4476-abda-3b3b7ff084b3\" (UID: \"6253c12a-e41a-4476-abda-3b3b7ff084b3\") " Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.777626 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6253c12a-e41a-4476-abda-3b3b7ff084b3-scripts\") pod \"6253c12a-e41a-4476-abda-3b3b7ff084b3\" (UID: \"6253c12a-e41a-4476-abda-3b3b7ff084b3\") " Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.777675 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6253c12a-e41a-4476-abda-3b3b7ff084b3-sg-core-conf-yaml\") pod \"6253c12a-e41a-4476-abda-3b3b7ff084b3\" (UID: \"6253c12a-e41a-4476-abda-3b3b7ff084b3\") " Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.777726 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6253c12a-e41a-4476-abda-3b3b7ff084b3-combined-ca-bundle\") pod \"6253c12a-e41a-4476-abda-3b3b7ff084b3\" (UID: \"6253c12a-e41a-4476-abda-3b3b7ff084b3\") " Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.777789 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6253c12a-e41a-4476-abda-3b3b7ff084b3-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "6253c12a-e41a-4476-abda-3b3b7ff084b3" (UID: "6253c12a-e41a-4476-abda-3b3b7ff084b3"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.778071 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6253c12a-e41a-4476-abda-3b3b7ff084b3-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "6253c12a-e41a-4476-abda-3b3b7ff084b3" (UID: "6253c12a-e41a-4476-abda-3b3b7ff084b3"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.778143 4903 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6253c12a-e41a-4476-abda-3b3b7ff084b3-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.782549 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6253c12a-e41a-4476-abda-3b3b7ff084b3-kube-api-access-9w8c5" (OuterVolumeSpecName: "kube-api-access-9w8c5") pod "6253c12a-e41a-4476-abda-3b3b7ff084b3" (UID: "6253c12a-e41a-4476-abda-3b3b7ff084b3"). InnerVolumeSpecName "kube-api-access-9w8c5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.797136 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6253c12a-e41a-4476-abda-3b3b7ff084b3-scripts" (OuterVolumeSpecName: "scripts") pod "6253c12a-e41a-4476-abda-3b3b7ff084b3" (UID: "6253c12a-e41a-4476-abda-3b3b7ff084b3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.846014 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6253c12a-e41a-4476-abda-3b3b7ff084b3-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "6253c12a-e41a-4476-abda-3b3b7ff084b3" (UID: "6253c12a-e41a-4476-abda-3b3b7ff084b3"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.880237 4903 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6253c12a-e41a-4476-abda-3b3b7ff084b3-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.880265 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9w8c5\" (UniqueName: \"kubernetes.io/projected/6253c12a-e41a-4476-abda-3b3b7ff084b3-kube-api-access-9w8c5\") on node \"crc\" DevicePath \"\"" Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.880274 4903 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6253c12a-e41a-4476-abda-3b3b7ff084b3-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.880282 4903 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6253c12a-e41a-4476-abda-3b3b7ff084b3-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.892174 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6253c12a-e41a-4476-abda-3b3b7ff084b3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6253c12a-e41a-4476-abda-3b3b7ff084b3" (UID: "6253c12a-e41a-4476-abda-3b3b7ff084b3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.923210 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6253c12a-e41a-4476-abda-3b3b7ff084b3-config-data" (OuterVolumeSpecName: "config-data") pod "6253c12a-e41a-4476-abda-3b3b7ff084b3" (UID: "6253c12a-e41a-4476-abda-3b3b7ff084b3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.987943 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6253c12a-e41a-4476-abda-3b3b7ff084b3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:45:11 crc kubenswrapper[4903]: I1126 22:45:11.987992 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6253c12a-e41a-4476-abda-3b3b7ff084b3-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:45:12 crc kubenswrapper[4903]: I1126 22:45:12.016330 4903 generic.go:334] "Generic (PLEG): container finished" podID="6253c12a-e41a-4476-abda-3b3b7ff084b3" containerID="69e1f212900b26fe09799a7cd49a2e680275e843290cdf02bb9099d5635b7af0" exitCode=137 Nov 26 22:45:12 crc kubenswrapper[4903]: I1126 22:45:12.016845 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6253c12a-e41a-4476-abda-3b3b7ff084b3","Type":"ContainerDied","Data":"69e1f212900b26fe09799a7cd49a2e680275e843290cdf02bb9099d5635b7af0"} Nov 26 22:45:12 crc kubenswrapper[4903]: I1126 22:45:12.016914 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6253c12a-e41a-4476-abda-3b3b7ff084b3","Type":"ContainerDied","Data":"3e9e9e281aeb9e63b16c87ae630152545d40d052a08eb2df12e965bbb2c6776f"} Nov 26 22:45:12 crc kubenswrapper[4903]: I1126 22:45:12.016933 4903 scope.go:117] "RemoveContainer" containerID="69e1f212900b26fe09799a7cd49a2e680275e843290cdf02bb9099d5635b7af0" Nov 26 22:45:12 crc kubenswrapper[4903]: I1126 22:45:12.017039 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:45:12 crc kubenswrapper[4903]: I1126 22:45:12.021313 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"e6df85b3aed75e06f1671871038b92ea2c8ddc5a28056b292518805cd9aa25b9"} Nov 26 22:45:12 crc kubenswrapper[4903]: I1126 22:45:12.021340 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"a6fe2ffe91dd71a816b6a5bcb03a0b75c2983c87e67292b799c670336583c25e"} Nov 26 22:45:12 crc kubenswrapper[4903]: I1126 22:45:12.066785 4903 scope.go:117] "RemoveContainer" containerID="94f3cd250463eb6fe09bd7e75991098991256d86a369bb54a97bf8a13f8b3e8d" Nov 26 22:45:12 crc kubenswrapper[4903]: I1126 22:45:12.103994 4903 scope.go:117] "RemoveContainer" containerID="6c029c5b88d3c27e6a18dbc9144642e77c354036f5c8b70d3e13b82f6b6ddda7" Nov 26 22:45:12 crc kubenswrapper[4903]: I1126 22:45:12.133537 4903 scope.go:117] "RemoveContainer" containerID="56d7355f2c8cedebe93c70435d337d4610bfddc024f6d1bacba4aef29dc5bf32" Nov 26 22:45:12 crc kubenswrapper[4903]: I1126 22:45:12.163154 4903 scope.go:117] "RemoveContainer" containerID="69e1f212900b26fe09799a7cd49a2e680275e843290cdf02bb9099d5635b7af0" Nov 26 22:45:12 crc kubenswrapper[4903]: E1126 22:45:12.163597 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"69e1f212900b26fe09799a7cd49a2e680275e843290cdf02bb9099d5635b7af0\": container with ID starting with 69e1f212900b26fe09799a7cd49a2e680275e843290cdf02bb9099d5635b7af0 not found: ID does not exist" containerID="69e1f212900b26fe09799a7cd49a2e680275e843290cdf02bb9099d5635b7af0" Nov 26 22:45:12 crc kubenswrapper[4903]: I1126 22:45:12.163641 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"69e1f212900b26fe09799a7cd49a2e680275e843290cdf02bb9099d5635b7af0"} err="failed to get container status \"69e1f212900b26fe09799a7cd49a2e680275e843290cdf02bb9099d5635b7af0\": rpc error: code = NotFound desc = could not find container \"69e1f212900b26fe09799a7cd49a2e680275e843290cdf02bb9099d5635b7af0\": container with ID starting with 69e1f212900b26fe09799a7cd49a2e680275e843290cdf02bb9099d5635b7af0 not found: ID does not exist" Nov 26 22:45:12 crc kubenswrapper[4903]: I1126 22:45:12.163672 4903 scope.go:117] "RemoveContainer" containerID="94f3cd250463eb6fe09bd7e75991098991256d86a369bb54a97bf8a13f8b3e8d" Nov 26 22:45:12 crc kubenswrapper[4903]: E1126 22:45:12.165196 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"94f3cd250463eb6fe09bd7e75991098991256d86a369bb54a97bf8a13f8b3e8d\": container with ID starting with 94f3cd250463eb6fe09bd7e75991098991256d86a369bb54a97bf8a13f8b3e8d not found: ID does not exist" containerID="94f3cd250463eb6fe09bd7e75991098991256d86a369bb54a97bf8a13f8b3e8d" Nov 26 22:45:12 crc kubenswrapper[4903]: I1126 22:45:12.165247 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94f3cd250463eb6fe09bd7e75991098991256d86a369bb54a97bf8a13f8b3e8d"} err="failed to get container status \"94f3cd250463eb6fe09bd7e75991098991256d86a369bb54a97bf8a13f8b3e8d\": rpc error: code = NotFound desc = could not find container \"94f3cd250463eb6fe09bd7e75991098991256d86a369bb54a97bf8a13f8b3e8d\": container with ID starting with 94f3cd250463eb6fe09bd7e75991098991256d86a369bb54a97bf8a13f8b3e8d not found: ID does not exist" Nov 26 22:45:12 crc kubenswrapper[4903]: I1126 22:45:12.165279 4903 scope.go:117] "RemoveContainer" containerID="6c029c5b88d3c27e6a18dbc9144642e77c354036f5c8b70d3e13b82f6b6ddda7" Nov 26 22:45:12 crc kubenswrapper[4903]: E1126 22:45:12.165542 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6c029c5b88d3c27e6a18dbc9144642e77c354036f5c8b70d3e13b82f6b6ddda7\": container with ID starting with 6c029c5b88d3c27e6a18dbc9144642e77c354036f5c8b70d3e13b82f6b6ddda7 not found: ID does not exist" containerID="6c029c5b88d3c27e6a18dbc9144642e77c354036f5c8b70d3e13b82f6b6ddda7" Nov 26 22:45:12 crc kubenswrapper[4903]: I1126 22:45:12.165565 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6c029c5b88d3c27e6a18dbc9144642e77c354036f5c8b70d3e13b82f6b6ddda7"} err="failed to get container status \"6c029c5b88d3c27e6a18dbc9144642e77c354036f5c8b70d3e13b82f6b6ddda7\": rpc error: code = NotFound desc = could not find container \"6c029c5b88d3c27e6a18dbc9144642e77c354036f5c8b70d3e13b82f6b6ddda7\": container with ID starting with 6c029c5b88d3c27e6a18dbc9144642e77c354036f5c8b70d3e13b82f6b6ddda7 not found: ID does not exist" Nov 26 22:45:12 crc kubenswrapper[4903]: I1126 22:45:12.165577 4903 scope.go:117] "RemoveContainer" containerID="56d7355f2c8cedebe93c70435d337d4610bfddc024f6d1bacba4aef29dc5bf32" Nov 26 22:45:12 crc kubenswrapper[4903]: E1126 22:45:12.166092 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56d7355f2c8cedebe93c70435d337d4610bfddc024f6d1bacba4aef29dc5bf32\": container with ID starting with 56d7355f2c8cedebe93c70435d337d4610bfddc024f6d1bacba4aef29dc5bf32 not found: ID does not exist" containerID="56d7355f2c8cedebe93c70435d337d4610bfddc024f6d1bacba4aef29dc5bf32" Nov 26 22:45:12 crc kubenswrapper[4903]: I1126 22:45:12.166120 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56d7355f2c8cedebe93c70435d337d4610bfddc024f6d1bacba4aef29dc5bf32"} err="failed to get container status \"56d7355f2c8cedebe93c70435d337d4610bfddc024f6d1bacba4aef29dc5bf32\": rpc error: code = NotFound desc = could not find container \"56d7355f2c8cedebe93c70435d337d4610bfddc024f6d1bacba4aef29dc5bf32\": container with ID starting with 56d7355f2c8cedebe93c70435d337d4610bfddc024f6d1bacba4aef29dc5bf32 not found: ID does not exist" Nov 26 22:45:12 crc kubenswrapper[4903]: I1126 22:45:12.214534 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 22:45:12 crc kubenswrapper[4903]: I1126 22:45:12.216063 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 22:45:12 crc kubenswrapper[4903]: I1126 22:45:12.220393 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 22:45:12 crc kubenswrapper[4903]: I1126 22:45:12.463454 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/kube-state-metrics-0" podUID="f58d4082-e69c-44e2-9961-9842cb738869" containerName="kube-state-metrics" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 26 22:45:12 crc kubenswrapper[4903]: I1126 22:45:12.540009 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 22:45:13 crc kubenswrapper[4903]: I1126 22:45:13.035814 4903 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7f049a52-ec02-4f6f-9856-00f50e8f0293" Nov 26 22:45:13 crc kubenswrapper[4903]: I1126 22:45:13.036079 4903 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7f049a52-ec02-4f6f-9856-00f50e8f0293" Nov 26 22:45:13 crc kubenswrapper[4903]: I1126 22:45:13.036205 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"e3adf61ce016ad29b1c68d72f376d5772b0de1fe0cc6c55028f0dc04f5e2674c"} Nov 26 22:45:13 crc kubenswrapper[4903]: I1126 22:45:13.036224 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"509ca6cd6f1207312c9e638dcca48826b5e75338d1efb4fe209e8da6bbb845b8"} Nov 26 22:45:13 crc kubenswrapper[4903]: I1126 22:45:13.036234 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"4e75f1b7184f9681744ba0eb35dcf3e7503fcf02b72f9973fb28dc2eaabe6d73"} Nov 26 22:45:13 crc kubenswrapper[4903]: I1126 22:45:13.036258 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:45:13 crc kubenswrapper[4903]: I1126 22:45:13.057893 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 22:45:14 crc kubenswrapper[4903]: I1126 22:45:14.014301 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-bxpxf" Nov 26 22:45:14 crc kubenswrapper[4903]: I1126 22:45:14.082716 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-bxpxf" Nov 26 22:45:14 crc kubenswrapper[4903]: I1126 22:45:14.787676 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators-redhat/loki-operator-controller-manager-5c85bfb685-pwxll" Nov 26 22:45:15 crc kubenswrapper[4903]: I1126 22:45:15.071303 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:45:15 crc kubenswrapper[4903]: I1126 22:45:15.071360 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:45:15 crc kubenswrapper[4903]: I1126 22:45:15.100014 4903 generic.go:334] "Generic (PLEG): container finished" podID="fcacd7dc-2b08-46d7-98c2-09cf6b6d690b" containerID="96c82dc1c33a0092972e9f05f6fbe9199dc6b53a530f909a2e37f78460fe4b7e" exitCode=1 Nov 26 22:45:15 crc kubenswrapper[4903]: I1126 22:45:15.100126 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5kmlf" event={"ID":"fcacd7dc-2b08-46d7-98c2-09cf6b6d690b","Type":"ContainerDied","Data":"96c82dc1c33a0092972e9f05f6fbe9199dc6b53a530f909a2e37f78460fe4b7e"} Nov 26 22:45:15 crc kubenswrapper[4903]: I1126 22:45:15.100960 4903 scope.go:117] "RemoveContainer" containerID="96c82dc1c33a0092972e9f05f6fbe9199dc6b53a530f909a2e37f78460fe4b7e" Nov 26 22:45:15 crc kubenswrapper[4903]: I1126 22:45:15.109988 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:45:16 crc kubenswrapper[4903]: I1126 22:45:16.111920 4903 generic.go:334] "Generic (PLEG): container finished" podID="edfb7faf-e9af-4ee8-85cd-a11af5812946" containerID="4cd2963ba045d645dcb7110139a8ae24a4d2fd2140d7dd107404d60e2e8b8984" exitCode=1 Nov 26 22:45:16 crc kubenswrapper[4903]: I1126 22:45:16.112020 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vj562" event={"ID":"edfb7faf-e9af-4ee8-85cd-a11af5812946","Type":"ContainerDied","Data":"4cd2963ba045d645dcb7110139a8ae24a4d2fd2140d7dd107404d60e2e8b8984"} Nov 26 22:45:16 crc kubenswrapper[4903]: I1126 22:45:16.114362 4903 scope.go:117] "RemoveContainer" containerID="4cd2963ba045d645dcb7110139a8ae24a4d2fd2140d7dd107404d60e2e8b8984" Nov 26 22:45:16 crc kubenswrapper[4903]: I1126 22:45:16.115178 4903 generic.go:334] "Generic (PLEG): container finished" podID="0c7b8e09-c502-425e-ac59-b2befd1132fa" containerID="0e45631332036649004a30569555be5e4bd132014e104ec35168613bbe348c66" exitCode=1 Nov 26 22:45:16 crc kubenswrapper[4903]: I1126 22:45:16.115245 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jn49q" event={"ID":"0c7b8e09-c502-425e-ac59-b2befd1132fa","Type":"ContainerDied","Data":"0e45631332036649004a30569555be5e4bd132014e104ec35168613bbe348c66"} Nov 26 22:45:16 crc kubenswrapper[4903]: I1126 22:45:16.115667 4903 scope.go:117] "RemoveContainer" containerID="0e45631332036649004a30569555be5e4bd132014e104ec35168613bbe348c66" Nov 26 22:45:16 crc kubenswrapper[4903]: I1126 22:45:16.117537 4903 generic.go:334] "Generic (PLEG): container finished" podID="3e621847-5f60-491a-8e5c-f2fb10df1726" containerID="2e7dfc9aa7196f1c2148e674ded6c2df43849959ea97e30165028a9eafef25a7" exitCode=1 Nov 26 22:45:16 crc kubenswrapper[4903]: I1126 22:45:16.117594 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-6hzbx" event={"ID":"3e621847-5f60-491a-8e5c-f2fb10df1726","Type":"ContainerDied","Data":"2e7dfc9aa7196f1c2148e674ded6c2df43849959ea97e30165028a9eafef25a7"} Nov 26 22:45:16 crc kubenswrapper[4903]: I1126 22:45:16.117962 4903 scope.go:117] "RemoveContainer" containerID="2e7dfc9aa7196f1c2148e674ded6c2df43849959ea97e30165028a9eafef25a7" Nov 26 22:45:16 crc kubenswrapper[4903]: I1126 22:45:16.124013 4903 generic.go:334] "Generic (PLEG): container finished" podID="d9a3465f-cd49-4af9-a908-58aec0273dbe" containerID="78b29c82490aeae36542576b3c953a3bd4910d210c60ee88b03fe025f8a92307" exitCode=1 Nov 26 22:45:16 crc kubenswrapper[4903]: I1126 22:45:16.124090 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-n7krq" event={"ID":"d9a3465f-cd49-4af9-a908-58aec0273dbe","Type":"ContainerDied","Data":"78b29c82490aeae36542576b3c953a3bd4910d210c60ee88b03fe025f8a92307"} Nov 26 22:45:16 crc kubenswrapper[4903]: I1126 22:45:16.125471 4903 scope.go:117] "RemoveContainer" containerID="78b29c82490aeae36542576b3c953a3bd4910d210c60ee88b03fe025f8a92307" Nov 26 22:45:16 crc kubenswrapper[4903]: I1126 22:45:16.126107 4903 generic.go:334] "Generic (PLEG): container finished" podID="83927c87-ccd7-4b29-97b1-8d03ce0d1b1e" containerID="b2b2d84dd240cab1a6be339205b067a6324f0903f14464e14364fb9e5a831c01" exitCode=1 Nov 26 22:45:16 crc kubenswrapper[4903]: I1126 22:45:16.126190 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vjt6h" event={"ID":"83927c87-ccd7-4b29-97b1-8d03ce0d1b1e","Type":"ContainerDied","Data":"b2b2d84dd240cab1a6be339205b067a6324f0903f14464e14364fb9e5a831c01"} Nov 26 22:45:16 crc kubenswrapper[4903]: I1126 22:45:16.126929 4903 scope.go:117] "RemoveContainer" containerID="b2b2d84dd240cab1a6be339205b067a6324f0903f14464e14364fb9e5a831c01" Nov 26 22:45:16 crc kubenswrapper[4903]: I1126 22:45:16.128542 4903 generic.go:334] "Generic (PLEG): container finished" podID="34b48ba8-04a0-463d-9e31-b7c13127ce9c" containerID="a0d3a7edb607adba1eba960a30bd237f65ef6b3dfe4ef3e7678e71c811f6c968" exitCode=1 Nov 26 22:45:16 crc kubenswrapper[4903]: I1126 22:45:16.128608 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66" event={"ID":"34b48ba8-04a0-463d-9e31-b7c13127ce9c","Type":"ContainerDied","Data":"a0d3a7edb607adba1eba960a30bd237f65ef6b3dfe4ef3e7678e71c811f6c968"} Nov 26 22:45:16 crc kubenswrapper[4903]: I1126 22:45:16.129201 4903 scope.go:117] "RemoveContainer" containerID="a0d3a7edb607adba1eba960a30bd237f65ef6b3dfe4ef3e7678e71c811f6c968" Nov 26 22:45:16 crc kubenswrapper[4903]: I1126 22:45:16.134336 4903 generic.go:334] "Generic (PLEG): container finished" podID="fcacd7dc-2b08-46d7-98c2-09cf6b6d690b" containerID="f4ca3bd9b58fdd3120309fa81b0dd7cd6658c254a4c193c43860f8c748366897" exitCode=1 Nov 26 22:45:16 crc kubenswrapper[4903]: I1126 22:45:16.134382 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5kmlf" event={"ID":"fcacd7dc-2b08-46d7-98c2-09cf6b6d690b","Type":"ContainerDied","Data":"f4ca3bd9b58fdd3120309fa81b0dd7cd6658c254a4c193c43860f8c748366897"} Nov 26 22:45:16 crc kubenswrapper[4903]: I1126 22:45:16.134417 4903 scope.go:117] "RemoveContainer" containerID="96c82dc1c33a0092972e9f05f6fbe9199dc6b53a530f909a2e37f78460fe4b7e" Nov 26 22:45:16 crc kubenswrapper[4903]: I1126 22:45:16.135262 4903 scope.go:117] "RemoveContainer" containerID="f4ca3bd9b58fdd3120309fa81b0dd7cd6658c254a4c193c43860f8c748366897" Nov 26 22:45:16 crc kubenswrapper[4903]: E1126 22:45:16.135748 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=nova-operator-controller-manager-79556f57fc-5kmlf_openstack-operators(fcacd7dc-2b08-46d7-98c2-09cf6b6d690b)\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5kmlf" podUID="fcacd7dc-2b08-46d7-98c2-09cf6b6d690b" Nov 26 22:45:17 crc kubenswrapper[4903]: I1126 22:45:17.008942 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 22:45:17 crc kubenswrapper[4903]: I1126 22:45:17.012512 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 22:45:17 crc kubenswrapper[4903]: I1126 22:45:17.147968 4903 generic.go:334] "Generic (PLEG): container finished" podID="edfb7faf-e9af-4ee8-85cd-a11af5812946" containerID="a316d08fa9c8e5a120edb61e927cf269d58887a7490641363143db58eae09d65" exitCode=1 Nov 26 22:45:17 crc kubenswrapper[4903]: I1126 22:45:17.148037 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vj562" event={"ID":"edfb7faf-e9af-4ee8-85cd-a11af5812946","Type":"ContainerDied","Data":"a316d08fa9c8e5a120edb61e927cf269d58887a7490641363143db58eae09d65"} Nov 26 22:45:17 crc kubenswrapper[4903]: I1126 22:45:17.148074 4903 scope.go:117] "RemoveContainer" containerID="4cd2963ba045d645dcb7110139a8ae24a4d2fd2140d7dd107404d60e2e8b8984" Nov 26 22:45:17 crc kubenswrapper[4903]: I1126 22:45:17.148909 4903 scope.go:117] "RemoveContainer" containerID="a316d08fa9c8e5a120edb61e927cf269d58887a7490641363143db58eae09d65" Nov 26 22:45:17 crc kubenswrapper[4903]: E1126 22:45:17.149242 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=manila-operator-controller-manager-5d499bf58b-vj562_openstack-operators(edfb7faf-e9af-4ee8-85cd-a11af5812946)\"" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vj562" podUID="edfb7faf-e9af-4ee8-85cd-a11af5812946" Nov 26 22:45:17 crc kubenswrapper[4903]: I1126 22:45:17.150726 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jn49q" event={"ID":"0c7b8e09-c502-425e-ac59-b2befd1132fa","Type":"ContainerStarted","Data":"ab1864b435a34ff3a8668ca0a9a95e3350c7ebc39e6263d3be14220bc746842f"} Nov 26 22:45:17 crc kubenswrapper[4903]: I1126 22:45:17.150904 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jn49q" Nov 26 22:45:17 crc kubenswrapper[4903]: I1126 22:45:17.153185 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-6hzbx" event={"ID":"3e621847-5f60-491a-8e5c-f2fb10df1726","Type":"ContainerStarted","Data":"11603c223759063b46c0c3d8e24fc3ba65d8a5f4aa0cf1fa1edf176fd82bd080"} Nov 26 22:45:17 crc kubenswrapper[4903]: I1126 22:45:17.153381 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-6hzbx" Nov 26 22:45:17 crc kubenswrapper[4903]: I1126 22:45:17.155264 4903 generic.go:334] "Generic (PLEG): container finished" podID="9c3a16ab-252a-4a01-aaab-b273d3d55c0a" containerID="7ec228a4a74f23af90bbdfabddbd0256ae9bf23a9046b007711242d44d17e32b" exitCode=1 Nov 26 22:45:17 crc kubenswrapper[4903]: I1126 22:45:17.155310 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-t5gqj" event={"ID":"9c3a16ab-252a-4a01-aaab-b273d3d55c0a","Type":"ContainerDied","Data":"7ec228a4a74f23af90bbdfabddbd0256ae9bf23a9046b007711242d44d17e32b"} Nov 26 22:45:17 crc kubenswrapper[4903]: I1126 22:45:17.155615 4903 scope.go:117] "RemoveContainer" containerID="7ec228a4a74f23af90bbdfabddbd0256ae9bf23a9046b007711242d44d17e32b" Nov 26 22:45:17 crc kubenswrapper[4903]: I1126 22:45:17.160805 4903 generic.go:334] "Generic (PLEG): container finished" podID="710215b7-5e67-47d8-833f-b8db638cac56" containerID="591fc07ae4101cbae3b0de0f9156300eada80b98d575b5d0d91fefc032dc50cb" exitCode=1 Nov 26 22:45:17 crc kubenswrapper[4903]: I1126 22:45:17.160854 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-shqxg" event={"ID":"710215b7-5e67-47d8-833f-b8db638cac56","Type":"ContainerDied","Data":"591fc07ae4101cbae3b0de0f9156300eada80b98d575b5d0d91fefc032dc50cb"} Nov 26 22:45:17 crc kubenswrapper[4903]: I1126 22:45:17.161226 4903 scope.go:117] "RemoveContainer" containerID="591fc07ae4101cbae3b0de0f9156300eada80b98d575b5d0d91fefc032dc50cb" Nov 26 22:45:17 crc kubenswrapper[4903]: I1126 22:45:17.166359 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-n7krq" event={"ID":"d9a3465f-cd49-4af9-a908-58aec0273dbe","Type":"ContainerStarted","Data":"805233c52fe5f40f3e88e2d390896c7960ca9c682cbb181d62a654111950c174"} Nov 26 22:45:17 crc kubenswrapper[4903]: I1126 22:45:17.166559 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-n7krq" Nov 26 22:45:17 crc kubenswrapper[4903]: I1126 22:45:17.169475 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vjt6h" event={"ID":"83927c87-ccd7-4b29-97b1-8d03ce0d1b1e","Type":"ContainerStarted","Data":"19a533539d7b78a39403e7495bda8184f336e894c58f24c88074d3be815a6b9b"} Nov 26 22:45:17 crc kubenswrapper[4903]: I1126 22:45:17.169730 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vjt6h" Nov 26 22:45:17 crc kubenswrapper[4903]: I1126 22:45:17.172809 4903 generic.go:334] "Generic (PLEG): container finished" podID="736b757c-8584-4b59-81d6-ffdd8bbac62c" containerID="46eafe262e8750d44bb32c6594ce8ba245e7fb89e08540b2174623a5fd00f013" exitCode=1 Nov 26 22:45:17 crc kubenswrapper[4903]: I1126 22:45:17.172873 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2h7mb" event={"ID":"736b757c-8584-4b59-81d6-ffdd8bbac62c","Type":"ContainerDied","Data":"46eafe262e8750d44bb32c6594ce8ba245e7fb89e08540b2174623a5fd00f013"} Nov 26 22:45:17 crc kubenswrapper[4903]: I1126 22:45:17.173455 4903 scope.go:117] "RemoveContainer" containerID="46eafe262e8750d44bb32c6594ce8ba245e7fb89e08540b2174623a5fd00f013" Nov 26 22:45:17 crc kubenswrapper[4903]: I1126 22:45:17.178207 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66" event={"ID":"34b48ba8-04a0-463d-9e31-b7c13127ce9c","Type":"ContainerStarted","Data":"5740b81e1127d2577ec233b943238069cd46de36948c317d12052f468f55bcab"} Nov 26 22:45:17 crc kubenswrapper[4903]: I1126 22:45:17.179472 4903 status_manager.go:317] "Container readiness changed for unknown container" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66" containerID="cri-o://a0d3a7edb607adba1eba960a30bd237f65ef6b3dfe4ef3e7678e71c811f6c968" Nov 26 22:45:17 crc kubenswrapper[4903]: I1126 22:45:17.179502 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66" Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.070663 4903 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.193369 4903 generic.go:334] "Generic (PLEG): container finished" podID="9c3a16ab-252a-4a01-aaab-b273d3d55c0a" containerID="cad76ff4141b0aae8bb72453967c3ae58e2161daaded3db3af2ebcfd94f7b6b7" exitCode=1 Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.193472 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-t5gqj" event={"ID":"9c3a16ab-252a-4a01-aaab-b273d3d55c0a","Type":"ContainerDied","Data":"cad76ff4141b0aae8bb72453967c3ae58e2161daaded3db3af2ebcfd94f7b6b7"} Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.194330 4903 scope.go:117] "RemoveContainer" containerID="7ec228a4a74f23af90bbdfabddbd0256ae9bf23a9046b007711242d44d17e32b" Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.195438 4903 scope.go:117] "RemoveContainer" containerID="cad76ff4141b0aae8bb72453967c3ae58e2161daaded3db3af2ebcfd94f7b6b7" Nov 26 22:45:18 crc kubenswrapper[4903]: E1126 22:45:18.195903 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=neutron-operator-controller-manager-6fdcddb789-t5gqj_openstack-operators(9c3a16ab-252a-4a01-aaab-b273d3d55c0a)\"" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-t5gqj" podUID="9c3a16ab-252a-4a01-aaab-b273d3d55c0a" Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.197222 4903 generic.go:334] "Generic (PLEG): container finished" podID="0c7b8e09-c502-425e-ac59-b2befd1132fa" containerID="ab1864b435a34ff3a8668ca0a9a95e3350c7ebc39e6263d3be14220bc746842f" exitCode=1 Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.197297 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jn49q" event={"ID":"0c7b8e09-c502-425e-ac59-b2befd1132fa","Type":"ContainerDied","Data":"ab1864b435a34ff3a8668ca0a9a95e3350c7ebc39e6263d3be14220bc746842f"} Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.198162 4903 scope.go:117] "RemoveContainer" containerID="ab1864b435a34ff3a8668ca0a9a95e3350c7ebc39e6263d3be14220bc746842f" Nov 26 22:45:18 crc kubenswrapper[4903]: E1126 22:45:18.198605 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ovn-operator-controller-manager-56897c768d-jn49q_openstack-operators(0c7b8e09-c502-425e-ac59-b2befd1132fa)\"" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jn49q" podUID="0c7b8e09-c502-425e-ac59-b2befd1132fa" Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.200226 4903 generic.go:334] "Generic (PLEG): container finished" podID="e3d89c00-9723-43a3-a1d2-866787257900" containerID="8beb6b6cf96c4e36ef6da37a31eef6c5e78e2d0b7f34dbf7a4ff45693dd1b468" exitCode=1 Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.200287 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s" event={"ID":"e3d89c00-9723-43a3-a1d2-866787257900","Type":"ContainerDied","Data":"8beb6b6cf96c4e36ef6da37a31eef6c5e78e2d0b7f34dbf7a4ff45693dd1b468"} Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.201403 4903 scope.go:117] "RemoveContainer" containerID="8beb6b6cf96c4e36ef6da37a31eef6c5e78e2d0b7f34dbf7a4ff45693dd1b468" Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.203828 4903 generic.go:334] "Generic (PLEG): container finished" podID="3e621847-5f60-491a-8e5c-f2fb10df1726" containerID="11603c223759063b46c0c3d8e24fc3ba65d8a5f4aa0cf1fa1edf176fd82bd080" exitCode=1 Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.203877 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-6hzbx" event={"ID":"3e621847-5f60-491a-8e5c-f2fb10df1726","Type":"ContainerDied","Data":"11603c223759063b46c0c3d8e24fc3ba65d8a5f4aa0cf1fa1edf176fd82bd080"} Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.205099 4903 scope.go:117] "RemoveContainer" containerID="11603c223759063b46c0c3d8e24fc3ba65d8a5f4aa0cf1fa1edf176fd82bd080" Nov 26 22:45:18 crc kubenswrapper[4903]: E1126 22:45:18.205788 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=barbican-operator-controller-manager-7b64f4fb85-6hzbx_openstack-operators(3e621847-5f60-491a-8e5c-f2fb10df1726)\"" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-6hzbx" podUID="3e621847-5f60-491a-8e5c-f2fb10df1726" Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.206839 4903 generic.go:334] "Generic (PLEG): container finished" podID="63feada5-3911-469e-a0b1-539b7aa2948d" containerID="822d7862abecb0b1fc1838b71d4c28f8aaeb12e03f49f915cbe9e3f1c99e470e" exitCode=1 Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.206981 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-rtztw" event={"ID":"63feada5-3911-469e-a0b1-539b7aa2948d","Type":"ContainerDied","Data":"822d7862abecb0b1fc1838b71d4c28f8aaeb12e03f49f915cbe9e3f1c99e470e"} Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.207581 4903 scope.go:117] "RemoveContainer" containerID="822d7862abecb0b1fc1838b71d4c28f8aaeb12e03f49f915cbe9e3f1c99e470e" Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.226763 4903 generic.go:334] "Generic (PLEG): container finished" podID="34b48ba8-04a0-463d-9e31-b7c13127ce9c" containerID="5740b81e1127d2577ec233b943238069cd46de36948c317d12052f468f55bcab" exitCode=1 Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.226913 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66" event={"ID":"34b48ba8-04a0-463d-9e31-b7c13127ce9c","Type":"ContainerDied","Data":"5740b81e1127d2577ec233b943238069cd46de36948c317d12052f468f55bcab"} Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.227596 4903 scope.go:117] "RemoveContainer" containerID="5740b81e1127d2577ec233b943238069cd46de36948c317d12052f468f55bcab" Nov 26 22:45:18 crc kubenswrapper[4903]: E1126 22:45:18.228058 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=horizon-operator-controller-manager-5d494799bf-v4b66_openstack-operators(34b48ba8-04a0-463d-9e31-b7c13127ce9c)\"" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66" podUID="34b48ba8-04a0-463d-9e31-b7c13127ce9c" Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.238241 4903 generic.go:334] "Generic (PLEG): container finished" podID="736b757c-8584-4b59-81d6-ffdd8bbac62c" containerID="e81c615b7aa0d42c8eac3cf141332857cf63bc69cb2bb47449f591981952b193" exitCode=1 Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.238298 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2h7mb" event={"ID":"736b757c-8584-4b59-81d6-ffdd8bbac62c","Type":"ContainerDied","Data":"e81c615b7aa0d42c8eac3cf141332857cf63bc69cb2bb47449f591981952b193"} Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.238838 4903 scope.go:117] "RemoveContainer" containerID="e81c615b7aa0d42c8eac3cf141332857cf63bc69cb2bb47449f591981952b193" Nov 26 22:45:18 crc kubenswrapper[4903]: E1126 22:45:18.239221 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=swift-operator-controller-manager-d77b94747-2h7mb_openstack-operators(736b757c-8584-4b59-81d6-ffdd8bbac62c)\"" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2h7mb" podUID="736b757c-8584-4b59-81d6-ffdd8bbac62c" Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.241646 4903 generic.go:334] "Generic (PLEG): container finished" podID="710215b7-5e67-47d8-833f-b8db638cac56" containerID="ddf341deb974b36987fb1ada1e0243a8768d0882468f018c1c37c69c176af664" exitCode=1 Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.241729 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-shqxg" event={"ID":"710215b7-5e67-47d8-833f-b8db638cac56","Type":"ContainerDied","Data":"ddf341deb974b36987fb1ada1e0243a8768d0882468f018c1c37c69c176af664"} Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.242558 4903 scope.go:117] "RemoveContainer" containerID="ddf341deb974b36987fb1ada1e0243a8768d0882468f018c1c37c69c176af664" Nov 26 22:45:18 crc kubenswrapper[4903]: E1126 22:45:18.242896 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=glance-operator-controller-manager-589cbd6b5b-shqxg_openstack-operators(710215b7-5e67-47d8-833f-b8db638cac56)\"" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-shqxg" podUID="710215b7-5e67-47d8-833f-b8db638cac56" Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.254969 4903 generic.go:334] "Generic (PLEG): container finished" podID="e0c12217-0537-436e-b0d9-5e5049888268" containerID="5f2f48bbdbfbd4e10f2d62fb208a61cababa73e65d09327a3127865af38185d9" exitCode=1 Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.255059 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr" event={"ID":"e0c12217-0537-436e-b0d9-5e5049888268","Type":"ContainerDied","Data":"5f2f48bbdbfbd4e10f2d62fb208a61cababa73e65d09327a3127865af38185d9"} Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.256013 4903 scope.go:117] "RemoveContainer" containerID="5f2f48bbdbfbd4e10f2d62fb208a61cababa73e65d09327a3127865af38185d9" Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.261985 4903 generic.go:334] "Generic (PLEG): container finished" podID="d9a3465f-cd49-4af9-a908-58aec0273dbe" containerID="805233c52fe5f40f3e88e2d390896c7960ca9c682cbb181d62a654111950c174" exitCode=1 Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.262069 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-n7krq" event={"ID":"d9a3465f-cd49-4af9-a908-58aec0273dbe","Type":"ContainerDied","Data":"805233c52fe5f40f3e88e2d390896c7960ca9c682cbb181d62a654111950c174"} Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.263029 4903 scope.go:117] "RemoveContainer" containerID="805233c52fe5f40f3e88e2d390896c7960ca9c682cbb181d62a654111950c174" Nov 26 22:45:18 crc kubenswrapper[4903]: E1126 22:45:18.263475 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=cinder-operator-controller-manager-6b7f75547b-n7krq_openstack-operators(d9a3465f-cd49-4af9-a908-58aec0273dbe)\"" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-n7krq" podUID="d9a3465f-cd49-4af9-a908-58aec0273dbe" Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.264250 4903 generic.go:334] "Generic (PLEG): container finished" podID="83927c87-ccd7-4b29-97b1-8d03ce0d1b1e" containerID="19a533539d7b78a39403e7495bda8184f336e894c58f24c88074d3be815a6b9b" exitCode=1 Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.264301 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vjt6h" event={"ID":"83927c87-ccd7-4b29-97b1-8d03ce0d1b1e","Type":"ContainerDied","Data":"19a533539d7b78a39403e7495bda8184f336e894c58f24c88074d3be815a6b9b"} Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.265092 4903 scope.go:117] "RemoveContainer" containerID="19a533539d7b78a39403e7495bda8184f336e894c58f24c88074d3be815a6b9b" Nov 26 22:45:18 crc kubenswrapper[4903]: E1126 22:45:18.265421 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=placement-operator-controller-manager-57988cc5b5-vjt6h_openstack-operators(83927c87-ccd7-4b29-97b1-8d03ce0d1b1e)\"" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vjt6h" podUID="83927c87-ccd7-4b29-97b1-8d03ce0d1b1e" Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.274996 4903 scope.go:117] "RemoveContainer" containerID="0e45631332036649004a30569555be5e4bd132014e104ec35168613bbe348c66" Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.478188 4903 scope.go:117] "RemoveContainer" containerID="2e7dfc9aa7196f1c2148e674ded6c2df43849959ea97e30165028a9eafef25a7" Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.597646 4903 scope.go:117] "RemoveContainer" containerID="a0d3a7edb607adba1eba960a30bd237f65ef6b3dfe4ef3e7678e71c811f6c968" Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.712611 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-955677c94-rtztw" Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.729772 4903 scope.go:117] "RemoveContainer" containerID="46eafe262e8750d44bb32c6594ce8ba245e7fb89e08540b2174623a5fd00f013" Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.730878 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-shqxg" Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.766892 4903 scope.go:117] "RemoveContainer" containerID="591fc07ae4101cbae3b0de0f9156300eada80b98d575b5d0d91fefc032dc50cb" Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.795761 4903 scope.go:117] "RemoveContainer" containerID="78b29c82490aeae36542576b3c953a3bd4910d210c60ee88b03fe025f8a92307" Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.808677 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr" Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.821418 4903 scope.go:117] "RemoveContainer" containerID="b2b2d84dd240cab1a6be339205b067a6324f0903f14464e14364fb9e5a831c01" Nov 26 22:45:18 crc kubenswrapper[4903]: I1126 22:45:18.827136 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.023298 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-t5gqj" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.050076 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5kmlf" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.052612 4903 scope.go:117] "RemoveContainer" containerID="f4ca3bd9b58fdd3120309fa81b0dd7cd6658c254a4c193c43860f8c748366897" Nov 26 22:45:19 crc kubenswrapper[4903]: E1126 22:45:19.053123 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=nova-operator-controller-manager-79556f57fc-5kmlf_openstack-operators(fcacd7dc-2b08-46d7-98c2-09cf6b6d690b)\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5kmlf" podUID="fcacd7dc-2b08-46d7-98c2-09cf6b6d690b" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.076579 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vj562" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.077412 4903 scope.go:117] "RemoveContainer" containerID="a316d08fa9c8e5a120edb61e927cf269d58887a7490641363143db58eae09d65" Nov 26 22:45:19 crc kubenswrapper[4903]: E1126 22:45:19.077672 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=manila-operator-controller-manager-5d499bf58b-vj562_openstack-operators(edfb7faf-e9af-4ee8-85cd-a11af5812946)\"" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vj562" podUID="edfb7faf-e9af-4ee8-85cd-a11af5812946" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.092912 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.279289 4903 scope.go:117] "RemoveContainer" containerID="ddf341deb974b36987fb1ada1e0243a8768d0882468f018c1c37c69c176af664" Nov 26 22:45:19 crc kubenswrapper[4903]: E1126 22:45:19.279894 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=glance-operator-controller-manager-589cbd6b5b-shqxg_openstack-operators(710215b7-5e67-47d8-833f-b8db638cac56)\"" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-shqxg" podUID="710215b7-5e67-47d8-833f-b8db638cac56" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.280580 4903 generic.go:334] "Generic (PLEG): container finished" podID="63feada5-3911-469e-a0b1-539b7aa2948d" containerID="a8b78afbb892ea12c18116123ca011a0ae8b858f3430350e37fbd74f2f3239c8" exitCode=1 Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.280657 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-rtztw" event={"ID":"63feada5-3911-469e-a0b1-539b7aa2948d","Type":"ContainerDied","Data":"a8b78afbb892ea12c18116123ca011a0ae8b858f3430350e37fbd74f2f3239c8"} Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.280738 4903 scope.go:117] "RemoveContainer" containerID="822d7862abecb0b1fc1838b71d4c28f8aaeb12e03f49f915cbe9e3f1c99e470e" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.281412 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/telemetry-operator-controller-manager-6986c4df8b-bkqnw" podUID="3f2ebc07-fbfc-4bd6-9622-63b820e47247" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.120:8081/readyz\": dial tcp 10.217.0.120:8081: connect: connection refused" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.281565 4903 scope.go:117] "RemoveContainer" containerID="a8b78afbb892ea12c18116123ca011a0ae8b858f3430350e37fbd74f2f3239c8" Nov 26 22:45:19 crc kubenswrapper[4903]: E1126 22:45:19.281963 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=designate-operator-controller-manager-955677c94-rtztw_openstack-operators(63feada5-3911-469e-a0b1-539b7aa2948d)\"" pod="openstack-operators/designate-operator-controller-manager-955677c94-rtztw" podUID="63feada5-3911-469e-a0b1-539b7aa2948d" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.283967 4903 scope.go:117] "RemoveContainer" containerID="5740b81e1127d2577ec233b943238069cd46de36948c317d12052f468f55bcab" Nov 26 22:45:19 crc kubenswrapper[4903]: E1126 22:45:19.284222 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=horizon-operator-controller-manager-5d494799bf-v4b66_openstack-operators(34b48ba8-04a0-463d-9e31-b7c13127ce9c)\"" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66" podUID="34b48ba8-04a0-463d-9e31-b7c13127ce9c" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.286119 4903 scope.go:117] "RemoveContainer" containerID="cad76ff4141b0aae8bb72453967c3ae58e2161daaded3db3af2ebcfd94f7b6b7" Nov 26 22:45:19 crc kubenswrapper[4903]: E1126 22:45:19.286331 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=neutron-operator-controller-manager-6fdcddb789-t5gqj_openstack-operators(9c3a16ab-252a-4a01-aaab-b273d3d55c0a)\"" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-t5gqj" podUID="9c3a16ab-252a-4a01-aaab-b273d3d55c0a" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.295687 4903 scope.go:117] "RemoveContainer" containerID="ab1864b435a34ff3a8668ca0a9a95e3350c7ebc39e6263d3be14220bc746842f" Nov 26 22:45:19 crc kubenswrapper[4903]: E1126 22:45:19.295937 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ovn-operator-controller-manager-56897c768d-jn49q_openstack-operators(0c7b8e09-c502-425e-ac59-b2befd1132fa)\"" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jn49q" podUID="0c7b8e09-c502-425e-ac59-b2befd1132fa" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.301518 4903 generic.go:334] "Generic (PLEG): container finished" podID="e3d89c00-9723-43a3-a1d2-866787257900" containerID="0346be5eacd9a55ef563fc4f37a01fa2c164dac887d8fd231e049a35a167664d" exitCode=1 Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.301590 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s" event={"ID":"e3d89c00-9723-43a3-a1d2-866787257900","Type":"ContainerDied","Data":"0346be5eacd9a55ef563fc4f37a01fa2c164dac887d8fd231e049a35a167664d"} Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.302464 4903 scope.go:117] "RemoveContainer" containerID="0346be5eacd9a55ef563fc4f37a01fa2c164dac887d8fd231e049a35a167664d" Nov 26 22:45:19 crc kubenswrapper[4903]: E1126 22:45:19.302869 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=keystone-operator-controller-manager-7b4567c7cf-kxg8s_openstack-operators(e3d89c00-9723-43a3-a1d2-866787257900)\"" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s" podUID="e3d89c00-9723-43a3-a1d2-866787257900" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.310260 4903 scope.go:117] "RemoveContainer" containerID="11603c223759063b46c0c3d8e24fc3ba65d8a5f4aa0cf1fa1edf176fd82bd080" Nov 26 22:45:19 crc kubenswrapper[4903]: E1126 22:45:19.310579 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=barbican-operator-controller-manager-7b64f4fb85-6hzbx_openstack-operators(3e621847-5f60-491a-8e5c-f2fb10df1726)\"" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-6hzbx" podUID="3e621847-5f60-491a-8e5c-f2fb10df1726" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.312969 4903 scope.go:117] "RemoveContainer" containerID="805233c52fe5f40f3e88e2d390896c7960ca9c682cbb181d62a654111950c174" Nov 26 22:45:19 crc kubenswrapper[4903]: E1126 22:45:19.313210 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=cinder-operator-controller-manager-6b7f75547b-n7krq_openstack-operators(d9a3465f-cd49-4af9-a908-58aec0273dbe)\"" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-n7krq" podUID="d9a3465f-cd49-4af9-a908-58aec0273dbe" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.314644 4903 generic.go:334] "Generic (PLEG): container finished" podID="1a890e26-66fb-47d6-85dc-ae6b9045e4c6" containerID="ddcc6f06f473530bca47bc249215ac04819ba47c6da30c7a7308f6cbfd86e75f" exitCode=1 Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.314685 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gw5wx" event={"ID":"1a890e26-66fb-47d6-85dc-ae6b9045e4c6","Type":"ContainerDied","Data":"ddcc6f06f473530bca47bc249215ac04819ba47c6da30c7a7308f6cbfd86e75f"} Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.315004 4903 scope.go:117] "RemoveContainer" containerID="ddcc6f06f473530bca47bc249215ac04819ba47c6da30c7a7308f6cbfd86e75f" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.316524 4903 generic.go:334] "Generic (PLEG): container finished" podID="f8815d8e-4b34-47b3-98fa-8370205381e0" containerID="288bd314ce3deb5913cf3f356646030d190fc91c10dc8940d7e10b3f64644580" exitCode=1 Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.316606 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bwfhp" event={"ID":"f8815d8e-4b34-47b3-98fa-8370205381e0","Type":"ContainerDied","Data":"288bd314ce3deb5913cf3f356646030d190fc91c10dc8940d7e10b3f64644580"} Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.317386 4903 scope.go:117] "RemoveContainer" containerID="288bd314ce3deb5913cf3f356646030d190fc91c10dc8940d7e10b3f64644580" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.321333 4903 generic.go:334] "Generic (PLEG): container finished" podID="e0c12217-0537-436e-b0d9-5e5049888268" containerID="b94229746ff897363aeb92d1df8bfbdf6a208f988bb5701bcedbb853a344580c" exitCode=1 Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.321365 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr" event={"ID":"e0c12217-0537-436e-b0d9-5e5049888268","Type":"ContainerDied","Data":"b94229746ff897363aeb92d1df8bfbdf6a208f988bb5701bcedbb853a344580c"} Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.321836 4903 scope.go:117] "RemoveContainer" containerID="b94229746ff897363aeb92d1df8bfbdf6a208f988bb5701bcedbb853a344580c" Nov 26 22:45:19 crc kubenswrapper[4903]: E1126 22:45:19.322081 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=heat-operator-controller-manager-5b77f656f-x59hr_openstack-operators(e0c12217-0537-436e-b0d9-5e5049888268)\"" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr" podUID="e0c12217-0537-436e-b0d9-5e5049888268" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.326636 4903 scope.go:117] "RemoveContainer" containerID="19a533539d7b78a39403e7495bda8184f336e894c58f24c88074d3be815a6b9b" Nov 26 22:45:19 crc kubenswrapper[4903]: E1126 22:45:19.326890 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=placement-operator-controller-manager-57988cc5b5-vjt6h_openstack-operators(83927c87-ccd7-4b29-97b1-8d03ce0d1b1e)\"" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vjt6h" podUID="83927c87-ccd7-4b29-97b1-8d03ce0d1b1e" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.330513 4903 generic.go:334] "Generic (PLEG): container finished" podID="32ccd880-8dfa-46d1-b262-5d10422527ec" containerID="3c94ced101570c9af35cd73d7d22b0b61b6d34a120764fc28d341cb1830af1de" exitCode=1 Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.330664 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-pzwmk" event={"ID":"32ccd880-8dfa-46d1-b262-5d10422527ec","Type":"ContainerDied","Data":"3c94ced101570c9af35cd73d7d22b0b61b6d34a120764fc28d341cb1830af1de"} Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.331583 4903 scope.go:117] "RemoveContainer" containerID="3c94ced101570c9af35cd73d7d22b0b61b6d34a120764fc28d341cb1830af1de" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.335013 4903 generic.go:334] "Generic (PLEG): container finished" podID="3f2ebc07-fbfc-4bd6-9622-63b820e47247" containerID="7ec669adb1f6ac9e1a2ccfabf07f67a13401b092148b962e90f2cb9c3a9dd9ee" exitCode=1 Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.335115 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-6986c4df8b-bkqnw" event={"ID":"3f2ebc07-fbfc-4bd6-9622-63b820e47247","Type":"ContainerDied","Data":"7ec669adb1f6ac9e1a2ccfabf07f67a13401b092148b962e90f2cb9c3a9dd9ee"} Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.335663 4903 scope.go:117] "RemoveContainer" containerID="7ec669adb1f6ac9e1a2ccfabf07f67a13401b092148b962e90f2cb9c3a9dd9ee" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.338184 4903 generic.go:334] "Generic (PLEG): container finished" podID="9239ccfa-cbaa-44b2-a70f-94a281d885f6" containerID="2df00e35d27059d34ff1d2f494351a367b0de5e3e774f99a24d52aa17250d1cd" exitCode=1 Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.338684 4903 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7f049a52-ec02-4f6f-9856-00f50e8f0293" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.338718 4903 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7f049a52-ec02-4f6f-9856-00f50e8f0293" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.338762 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" event={"ID":"9239ccfa-cbaa-44b2-a70f-94a281d885f6","Type":"ContainerDied","Data":"2df00e35d27059d34ff1d2f494351a367b0de5e3e774f99a24d52aa17250d1cd"} Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.339658 4903 scope.go:117] "RemoveContainer" containerID="2df00e35d27059d34ff1d2f494351a367b0de5e3e774f99a24d52aa17250d1cd" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.347125 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.392252 4903 scope.go:117] "RemoveContainer" containerID="8beb6b6cf96c4e36ef6da37a31eef6c5e78e2d0b7f34dbf7a4ff45693dd1b468" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.540549 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2h7mb" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.541300 4903 scope.go:117] "RemoveContainer" containerID="e81c615b7aa0d42c8eac3cf141332857cf63bc69cb2bb47449f591981952b193" Nov 26 22:45:19 crc kubenswrapper[4903]: E1126 22:45:19.541564 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=swift-operator-controller-manager-d77b94747-2h7mb_openstack-operators(736b757c-8584-4b59-81d6-ffdd8bbac62c)\"" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2h7mb" podUID="736b757c-8584-4b59-81d6-ffdd8bbac62c" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.604403 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gw5wx" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.609677 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bwfhp" Nov 26 22:45:19 crc kubenswrapper[4903]: I1126 22:45:19.622740 4903 scope.go:117] "RemoveContainer" containerID="5f2f48bbdbfbd4e10f2d62fb208a61cababa73e65d09327a3127865af38185d9" Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.243191 4903 request.go:700] Waited for 1.007464013s, retries: 1, retry-after: 5s - retry-reason: 503 - request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/secrets?allowWatchBookmarks=true&fieldSelector=metadata.name%3Dredhat-marketplace-dockercfg-x2ctb&resourceVersion=51095&timeout=45m40s&timeoutSeconds=2740&watch=true Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.356565 4903 scope.go:117] "RemoveContainer" containerID="b94229746ff897363aeb92d1df8bfbdf6a208f988bb5701bcedbb853a344580c" Nov 26 22:45:20 crc kubenswrapper[4903]: E1126 22:45:20.356891 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=heat-operator-controller-manager-5b77f656f-x59hr_openstack-operators(e0c12217-0537-436e-b0d9-5e5049888268)\"" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr" podUID="e0c12217-0537-436e-b0d9-5e5049888268" Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.362369 4903 scope.go:117] "RemoveContainer" containerID="0346be5eacd9a55ef563fc4f37a01fa2c164dac887d8fd231e049a35a167664d" Nov 26 22:45:20 crc kubenswrapper[4903]: E1126 22:45:20.362753 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=keystone-operator-controller-manager-7b4567c7cf-kxg8s_openstack-operators(e3d89c00-9723-43a3-a1d2-866787257900)\"" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s" podUID="e3d89c00-9723-43a3-a1d2-866787257900" Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.368227 4903 scope.go:117] "RemoveContainer" containerID="a8b78afbb892ea12c18116123ca011a0ae8b858f3430350e37fbd74f2f3239c8" Nov 26 22:45:20 crc kubenswrapper[4903]: E1126 22:45:20.369007 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=designate-operator-controller-manager-955677c94-rtztw_openstack-operators(63feada5-3911-469e-a0b1-539b7aa2948d)\"" pod="openstack-operators/designate-operator-controller-manager-955677c94-rtztw" podUID="63feada5-3911-469e-a0b1-539b7aa2948d" Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.370178 4903 generic.go:334] "Generic (PLEG): container finished" podID="d4e9967e-dcf0-42c1-94fc-fea289ed54c2" containerID="03b8de7e39cebb6aea4e1ecbf20184a5bdd9a63361f3dd0575cb70d140321eda" exitCode=1 Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.370213 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54" event={"ID":"d4e9967e-dcf0-42c1-94fc-fea289ed54c2","Type":"ContainerDied","Data":"03b8de7e39cebb6aea4e1ecbf20184a5bdd9a63361f3dd0575cb70d140321eda"} Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.371180 4903 scope.go:117] "RemoveContainer" containerID="03b8de7e39cebb6aea4e1ecbf20184a5bdd9a63361f3dd0575cb70d140321eda" Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.373637 4903 generic.go:334] "Generic (PLEG): container finished" podID="32ccd880-8dfa-46d1-b262-5d10422527ec" containerID="5b840ea3a5fb538ffaba96a84254e79fc8370cfc2d0eed26c6b3310175ed8eec" exitCode=1 Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.373669 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-pzwmk" event={"ID":"32ccd880-8dfa-46d1-b262-5d10422527ec","Type":"ContainerDied","Data":"5b840ea3a5fb538ffaba96a84254e79fc8370cfc2d0eed26c6b3310175ed8eec"} Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.373719 4903 scope.go:117] "RemoveContainer" containerID="3c94ced101570c9af35cd73d7d22b0b61b6d34a120764fc28d341cb1830af1de" Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.374143 4903 scope.go:117] "RemoveContainer" containerID="5b840ea3a5fb538ffaba96a84254e79fc8370cfc2d0eed26c6b3310175ed8eec" Nov 26 22:45:20 crc kubenswrapper[4903]: E1126 22:45:20.374382 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=mariadb-operator-controller-manager-66f4dd4bc7-pzwmk_openstack-operators(32ccd880-8dfa-46d1-b262-5d10422527ec)\"" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-pzwmk" podUID="32ccd880-8dfa-46d1-b262-5d10422527ec" Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.385412 4903 generic.go:334] "Generic (PLEG): container finished" podID="3f2ebc07-fbfc-4bd6-9622-63b820e47247" containerID="0e691f467ec275fad3c08ba8876e621a9042e2b03a4a3a1bee4b2ecaaada8121" exitCode=1 Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.385508 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-6986c4df8b-bkqnw" event={"ID":"3f2ebc07-fbfc-4bd6-9622-63b820e47247","Type":"ContainerDied","Data":"0e691f467ec275fad3c08ba8876e621a9042e2b03a4a3a1bee4b2ecaaada8121"} Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.388144 4903 scope.go:117] "RemoveContainer" containerID="0e691f467ec275fad3c08ba8876e621a9042e2b03a4a3a1bee4b2ecaaada8121" Nov 26 22:45:20 crc kubenswrapper[4903]: E1126 22:45:20.388623 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=telemetry-operator-controller-manager-6986c4df8b-bkqnw_openstack-operators(3f2ebc07-fbfc-4bd6-9622-63b820e47247)\"" pod="openstack-operators/telemetry-operator-controller-manager-6986c4df8b-bkqnw" podUID="3f2ebc07-fbfc-4bd6-9622-63b820e47247" Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.394446 4903 generic.go:334] "Generic (PLEG): container finished" podID="b34e8bed-559a-49d6-b870-c375f36be49f" containerID="8f387160167c450d6022dd0915096e662c6cef237217b5aaaf59921b82ba050d" exitCode=1 Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.394502 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw" event={"ID":"b34e8bed-559a-49d6-b870-c375f36be49f","Type":"ContainerDied","Data":"8f387160167c450d6022dd0915096e662c6cef237217b5aaaf59921b82ba050d"} Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.398120 4903 scope.go:117] "RemoveContainer" containerID="8f387160167c450d6022dd0915096e662c6cef237217b5aaaf59921b82ba050d" Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.398523 4903 generic.go:334] "Generic (PLEG): container finished" podID="651c7100-bdd0-41e2-8a7f-eaab13dfd391" containerID="804a40a770771087be097ba58b157e480b884d71a51ea79beece38c9cae8b2a9" exitCode=1 Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.398597 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5bd96487c4-8k4kq" event={"ID":"651c7100-bdd0-41e2-8a7f-eaab13dfd391","Type":"ContainerDied","Data":"804a40a770771087be097ba58b157e480b884d71a51ea79beece38c9cae8b2a9"} Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.399338 4903 scope.go:117] "RemoveContainer" containerID="804a40a770771087be097ba58b157e480b884d71a51ea79beece38c9cae8b2a9" Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.402411 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gw5wx" event={"ID":"1a890e26-66fb-47d6-85dc-ae6b9045e4c6","Type":"ContainerStarted","Data":"e89c46c7bf8672065676e484481394f8d3b69e4dc303afe9e3615272fea478c0"} Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.402747 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gw5wx" Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.405556 4903 generic.go:334] "Generic (PLEG): container finished" podID="8248a160-f606-4eaa-9bc1-0e7fcc1ab852" containerID="a65314aa19c0b365aa18ec5534e98902b4078919be888afef02c8df601451f5e" exitCode=1 Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.405588 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fzd8p" event={"ID":"8248a160-f606-4eaa-9bc1-0e7fcc1ab852","Type":"ContainerDied","Data":"a65314aa19c0b365aa18ec5534e98902b4078919be888afef02c8df601451f5e"} Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.406265 4903 scope.go:117] "RemoveContainer" containerID="a65314aa19c0b365aa18ec5534e98902b4078919be888afef02c8df601451f5e" Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.410084 4903 generic.go:334] "Generic (PLEG): container finished" podID="f8815d8e-4b34-47b3-98fa-8370205381e0" containerID="76b03db273f5b2435589e2e2768d1c9f1f333434138c04deec31f17ae71c3446" exitCode=1 Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.410165 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bwfhp" event={"ID":"f8815d8e-4b34-47b3-98fa-8370205381e0","Type":"ContainerDied","Data":"76b03db273f5b2435589e2e2768d1c9f1f333434138c04deec31f17ae71c3446"} Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.411222 4903 scope.go:117] "RemoveContainer" containerID="76b03db273f5b2435589e2e2768d1c9f1f333434138c04deec31f17ae71c3446" Nov 26 22:45:20 crc kubenswrapper[4903]: E1126 22:45:20.411711 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=watcher-operator-controller-manager-656dcb59d4-bwfhp_openstack-operators(f8815d8e-4b34-47b3-98fa-8370205381e0)\"" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bwfhp" podUID="f8815d8e-4b34-47b3-98fa-8370205381e0" Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.412958 4903 generic.go:334] "Generic (PLEG): container finished" podID="9239ccfa-cbaa-44b2-a70f-94a281d885f6" containerID="9f2013d79e3bdcf5cf56513446bc03890a04a07a43e644047512ea4d009463b2" exitCode=1 Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.412985 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" event={"ID":"9239ccfa-cbaa-44b2-a70f-94a281d885f6","Type":"ContainerDied","Data":"9f2013d79e3bdcf5cf56513446bc03890a04a07a43e644047512ea4d009463b2"} Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.413336 4903 scope.go:117] "RemoveContainer" containerID="5740b81e1127d2577ec233b943238069cd46de36948c317d12052f468f55bcab" Nov 26 22:45:20 crc kubenswrapper[4903]: E1126 22:45:20.413553 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=horizon-operator-controller-manager-5d494799bf-v4b66_openstack-operators(34b48ba8-04a0-463d-9e31-b7c13127ce9c)\"" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66" podUID="34b48ba8-04a0-463d-9e31-b7c13127ce9c" Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.413614 4903 scope.go:117] "RemoveContainer" containerID="9f2013d79e3bdcf5cf56513446bc03890a04a07a43e644047512ea4d009463b2" Nov 26 22:45:20 crc kubenswrapper[4903]: E1126 22:45:20.414043 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=openstack-operator-controller-manager-5467d974c6-lpj77_openstack-operators(9239ccfa-cbaa-44b2-a70f-94a281d885f6)\"" pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" podUID="9239ccfa-cbaa-44b2-a70f-94a281d885f6" Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.606637 4903 scope.go:117] "RemoveContainer" containerID="7ec669adb1f6ac9e1a2ccfabf07f67a13401b092148b962e90f2cb9c3a9dd9ee" Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.666042 4903 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="6634d493-9065-414a-b7e7-3f99b8450625" Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.733225 4903 scope.go:117] "RemoveContainer" containerID="288bd314ce3deb5913cf3f356646030d190fc91c10dc8940d7e10b3f64644580" Nov 26 22:45:20 crc kubenswrapper[4903]: I1126 22:45:20.778065 4903 scope.go:117] "RemoveContainer" containerID="2df00e35d27059d34ff1d2f494351a367b0de5e3e774f99a24d52aa17250d1cd" Nov 26 22:45:21 crc kubenswrapper[4903]: I1126 22:45:21.429137 4903 generic.go:334] "Generic (PLEG): container finished" podID="8248a160-f606-4eaa-9bc1-0e7fcc1ab852" containerID="b61f1738921cbe64573b58376e2be8fc44d76ed21e99391ab498002f537ec2d0" exitCode=1 Nov 26 22:45:21 crc kubenswrapper[4903]: I1126 22:45:21.429207 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fzd8p" event={"ID":"8248a160-f606-4eaa-9bc1-0e7fcc1ab852","Type":"ContainerDied","Data":"b61f1738921cbe64573b58376e2be8fc44d76ed21e99391ab498002f537ec2d0"} Nov 26 22:45:21 crc kubenswrapper[4903]: I1126 22:45:21.429259 4903 scope.go:117] "RemoveContainer" containerID="a65314aa19c0b365aa18ec5534e98902b4078919be888afef02c8df601451f5e" Nov 26 22:45:21 crc kubenswrapper[4903]: I1126 22:45:21.429884 4903 scope.go:117] "RemoveContainer" containerID="b61f1738921cbe64573b58376e2be8fc44d76ed21e99391ab498002f537ec2d0" Nov 26 22:45:21 crc kubenswrapper[4903]: E1126 22:45:21.430302 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=operator pod=rabbitmq-cluster-operator-manager-668c99d594-fzd8p_openstack-operators(8248a160-f606-4eaa-9bc1-0e7fcc1ab852)\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fzd8p" podUID="8248a160-f606-4eaa-9bc1-0e7fcc1ab852" Nov 26 22:45:21 crc kubenswrapper[4903]: I1126 22:45:21.435657 4903 generic.go:334] "Generic (PLEG): container finished" podID="b34e8bed-559a-49d6-b870-c375f36be49f" containerID="e147c460e04625d298597155031751a7e09945e545d5d0de83e8535709d852ae" exitCode=1 Nov 26 22:45:21 crc kubenswrapper[4903]: I1126 22:45:21.435749 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw" event={"ID":"b34e8bed-559a-49d6-b870-c375f36be49f","Type":"ContainerDied","Data":"e147c460e04625d298597155031751a7e09945e545d5d0de83e8535709d852ae"} Nov 26 22:45:21 crc kubenswrapper[4903]: I1126 22:45:21.437333 4903 scope.go:117] "RemoveContainer" containerID="e147c460e04625d298597155031751a7e09945e545d5d0de83e8535709d852ae" Nov 26 22:45:21 crc kubenswrapper[4903]: E1126 22:45:21.437872 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=infra-operator-controller-manager-57548d458d-tdlsw_openstack-operators(b34e8bed-559a-49d6-b870-c375f36be49f)\"" pod="openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw" podUID="b34e8bed-559a-49d6-b870-c375f36be49f" Nov 26 22:45:21 crc kubenswrapper[4903]: I1126 22:45:21.439296 4903 scope.go:117] "RemoveContainer" containerID="76b03db273f5b2435589e2e2768d1c9f1f333434138c04deec31f17ae71c3446" Nov 26 22:45:21 crc kubenswrapper[4903]: E1126 22:45:21.439590 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=watcher-operator-controller-manager-656dcb59d4-bwfhp_openstack-operators(f8815d8e-4b34-47b3-98fa-8370205381e0)\"" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bwfhp" podUID="f8815d8e-4b34-47b3-98fa-8370205381e0" Nov 26 22:45:21 crc kubenswrapper[4903]: I1126 22:45:21.443992 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54" event={"ID":"d4e9967e-dcf0-42c1-94fc-fea289ed54c2","Type":"ContainerStarted","Data":"2f37f3ca2425f3f0cecdedf9dd57ea8726671dfdba241d955b965dce61f734c3"} Nov 26 22:45:21 crc kubenswrapper[4903]: I1126 22:45:21.444803 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54" Nov 26 22:45:21 crc kubenswrapper[4903]: I1126 22:45:21.447416 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5bd96487c4-8k4kq" event={"ID":"651c7100-bdd0-41e2-8a7f-eaab13dfd391","Type":"ContainerStarted","Data":"d10bebada2ac9a80ae30d376f6b8a8d93e3df5be416018aca626e6e0d3c40f53"} Nov 26 22:45:21 crc kubenswrapper[4903]: I1126 22:45:21.448053 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-5bd96487c4-8k4kq" Nov 26 22:45:21 crc kubenswrapper[4903]: I1126 22:45:21.450944 4903 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7f049a52-ec02-4f6f-9856-00f50e8f0293" Nov 26 22:45:21 crc kubenswrapper[4903]: I1126 22:45:21.450970 4903 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7f049a52-ec02-4f6f-9856-00f50e8f0293" Nov 26 22:45:21 crc kubenswrapper[4903]: I1126 22:45:21.589248 4903 scope.go:117] "RemoveContainer" containerID="8f387160167c450d6022dd0915096e662c6cef237217b5aaaf59921b82ba050d" Nov 26 22:45:21 crc kubenswrapper[4903]: I1126 22:45:21.746051 4903 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="6634d493-9065-414a-b7e7-3f99b8450625" Nov 26 22:45:22 crc kubenswrapper[4903]: I1126 22:45:22.468999 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/kube-state-metrics-0" podUID="f58d4082-e69c-44e2-9961-9842cb738869" containerName="kube-state-metrics" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 26 22:45:22 crc kubenswrapper[4903]: I1126 22:45:22.469116 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/kube-state-metrics-0" Nov 26 22:45:22 crc kubenswrapper[4903]: I1126 22:45:22.470411 4903 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-state-metrics" containerStatusID={"Type":"cri-o","ID":"f8f5bf943f03607d238dccf33bd9dd7056c3a36cd9bc2079ff293714a3c2b7ae"} pod="openstack/kube-state-metrics-0" containerMessage="Container kube-state-metrics failed liveness probe, will be restarted" Nov 26 22:45:22 crc kubenswrapper[4903]: I1126 22:45:22.470451 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="f58d4082-e69c-44e2-9961-9842cb738869" containerName="kube-state-metrics" containerID="cri-o://f8f5bf943f03607d238dccf33bd9dd7056c3a36cd9bc2079ff293714a3c2b7ae" gracePeriod=30 Nov 26 22:45:22 crc kubenswrapper[4903]: I1126 22:45:22.545226 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 22:45:23 crc kubenswrapper[4903]: I1126 22:45:23.029001 4903 scope.go:117] "RemoveContainer" containerID="24ffd058d044769113f4b6185890f4a4c57ff005b352c63ff9c34e9c1de461de" Nov 26 22:45:23 crc kubenswrapper[4903]: I1126 22:45:23.502489 4903 generic.go:334] "Generic (PLEG): container finished" podID="f58d4082-e69c-44e2-9961-9842cb738869" containerID="f8f5bf943f03607d238dccf33bd9dd7056c3a36cd9bc2079ff293714a3c2b7ae" exitCode=2 Nov 26 22:45:23 crc kubenswrapper[4903]: I1126 22:45:23.502860 4903 generic.go:334] "Generic (PLEG): container finished" podID="f58d4082-e69c-44e2-9961-9842cb738869" containerID="1c9519f889959176ce4345582276a277f3e3ea1cc9c922c1a68bc8ce4316fc12" exitCode=1 Nov 26 22:45:23 crc kubenswrapper[4903]: I1126 22:45:23.502627 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f58d4082-e69c-44e2-9961-9842cb738869","Type":"ContainerDied","Data":"f8f5bf943f03607d238dccf33bd9dd7056c3a36cd9bc2079ff293714a3c2b7ae"} Nov 26 22:45:23 crc kubenswrapper[4903]: I1126 22:45:23.502984 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f58d4082-e69c-44e2-9961-9842cb738869","Type":"ContainerDied","Data":"1c9519f889959176ce4345582276a277f3e3ea1cc9c922c1a68bc8ce4316fc12"} Nov 26 22:45:23 crc kubenswrapper[4903]: I1126 22:45:23.503019 4903 scope.go:117] "RemoveContainer" containerID="f8f5bf943f03607d238dccf33bd9dd7056c3a36cd9bc2079ff293714a3c2b7ae" Nov 26 22:45:23 crc kubenswrapper[4903]: I1126 22:45:23.506964 4903 scope.go:117] "RemoveContainer" containerID="1c9519f889959176ce4345582276a277f3e3ea1cc9c922c1a68bc8ce4316fc12" Nov 26 22:45:23 crc kubenswrapper[4903]: I1126 22:45:23.516010 4903 generic.go:334] "Generic (PLEG): container finished" podID="ced64189-a8c9-4e13-956b-f69139a9602b" containerID="0df6f2cd8131e0da17bc972601caed560cc5e8a238ffdbe51297b1e4fdac1419" exitCode=1 Nov 26 22:45:23 crc kubenswrapper[4903]: I1126 22:45:23.516074 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" event={"ID":"ced64189-a8c9-4e13-956b-f69139a9602b","Type":"ContainerDied","Data":"0df6f2cd8131e0da17bc972601caed560cc5e8a238ffdbe51297b1e4fdac1419"} Nov 26 22:45:23 crc kubenswrapper[4903]: I1126 22:45:23.517469 4903 scope.go:117] "RemoveContainer" containerID="0df6f2cd8131e0da17bc972601caed560cc5e8a238ffdbe51297b1e4fdac1419" Nov 26 22:45:23 crc kubenswrapper[4903]: E1126 22:45:23.519208 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ironic-operator-controller-manager-67cb4dc6d4-bm7r7_openstack-operators(ced64189-a8c9-4e13-956b-f69139a9602b)\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" podUID="ced64189-a8c9-4e13-956b-f69139a9602b" Nov 26 22:45:23 crc kubenswrapper[4903]: I1126 22:45:23.553637 4903 scope.go:117] "RemoveContainer" containerID="f8f5bf943f03607d238dccf33bd9dd7056c3a36cd9bc2079ff293714a3c2b7ae" Nov 26 22:45:23 crc kubenswrapper[4903]: E1126 22:45:23.554426 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f8f5bf943f03607d238dccf33bd9dd7056c3a36cd9bc2079ff293714a3c2b7ae\": container with ID starting with f8f5bf943f03607d238dccf33bd9dd7056c3a36cd9bc2079ff293714a3c2b7ae not found: ID does not exist" containerID="f8f5bf943f03607d238dccf33bd9dd7056c3a36cd9bc2079ff293714a3c2b7ae" Nov 26 22:45:23 crc kubenswrapper[4903]: I1126 22:45:23.554491 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f8f5bf943f03607d238dccf33bd9dd7056c3a36cd9bc2079ff293714a3c2b7ae"} err="failed to get container status \"f8f5bf943f03607d238dccf33bd9dd7056c3a36cd9bc2079ff293714a3c2b7ae\": rpc error: code = NotFound desc = could not find container \"f8f5bf943f03607d238dccf33bd9dd7056c3a36cd9bc2079ff293714a3c2b7ae\": container with ID starting with f8f5bf943f03607d238dccf33bd9dd7056c3a36cd9bc2079ff293714a3c2b7ae not found: ID does not exist" Nov 26 22:45:23 crc kubenswrapper[4903]: I1126 22:45:23.554535 4903 scope.go:117] "RemoveContainer" containerID="24ffd058d044769113f4b6185890f4a4c57ff005b352c63ff9c34e9c1de461de" Nov 26 22:45:24 crc kubenswrapper[4903]: I1126 22:45:24.029929 4903 scope.go:117] "RemoveContainer" containerID="2e687205566a6e57fd161c08f633805da9374becf244df76b3177e89d7fde819" Nov 26 22:45:24 crc kubenswrapper[4903]: I1126 22:45:24.030737 4903 scope.go:117] "RemoveContainer" containerID="c623167ba1f18e5ba98ad1f8c49f9107b24293bcf656ecd7df27401a948418cb" Nov 26 22:45:24 crc kubenswrapper[4903]: I1126 22:45:24.455601 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw" Nov 26 22:45:24 crc kubenswrapper[4903]: I1126 22:45:24.455929 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw" Nov 26 22:45:24 crc kubenswrapper[4903]: I1126 22:45:24.456826 4903 scope.go:117] "RemoveContainer" containerID="e147c460e04625d298597155031751a7e09945e545d5d0de83e8535709d852ae" Nov 26 22:45:24 crc kubenswrapper[4903]: E1126 22:45:24.457098 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=infra-operator-controller-manager-57548d458d-tdlsw_openstack-operators(b34e8bed-559a-49d6-b870-c375f36be49f)\"" pod="openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw" podUID="b34e8bed-559a-49d6-b870-c375f36be49f" Nov 26 22:45:24 crc kubenswrapper[4903]: I1126 22:45:24.536733 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" event={"ID":"6b930423-80e6-4e2c-825f-7deceec090f5","Type":"ContainerStarted","Data":"94a8b1243ee0dd0e214c43ce7484f1a52dd5b79532016eb06bf38b170a07d1ad"} Nov 26 22:45:24 crc kubenswrapper[4903]: I1126 22:45:24.537010 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" Nov 26 22:45:24 crc kubenswrapper[4903]: I1126 22:45:24.544968 4903 generic.go:334] "Generic (PLEG): container finished" podID="f58d4082-e69c-44e2-9961-9842cb738869" containerID="c2450cce3468d69a60f2d74e34daec8cfa728e35f26892f2237db68674914921" exitCode=1 Nov 26 22:45:24 crc kubenswrapper[4903]: I1126 22:45:24.545030 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f58d4082-e69c-44e2-9961-9842cb738869","Type":"ContainerDied","Data":"c2450cce3468d69a60f2d74e34daec8cfa728e35f26892f2237db68674914921"} Nov 26 22:45:24 crc kubenswrapper[4903]: I1126 22:45:24.545056 4903 scope.go:117] "RemoveContainer" containerID="1c9519f889959176ce4345582276a277f3e3ea1cc9c922c1a68bc8ce4316fc12" Nov 26 22:45:24 crc kubenswrapper[4903]: I1126 22:45:24.545571 4903 scope.go:117] "RemoveContainer" containerID="c2450cce3468d69a60f2d74e34daec8cfa728e35f26892f2237db68674914921" Nov 26 22:45:24 crc kubenswrapper[4903]: E1126 22:45:24.545819 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-state-metrics pod=kube-state-metrics-0_openstack(f58d4082-e69c-44e2-9961-9842cb738869)\"" pod="openstack/kube-state-metrics-0" podUID="f58d4082-e69c-44e2-9961-9842cb738869" Nov 26 22:45:24 crc kubenswrapper[4903]: I1126 22:45:24.552684 4903 generic.go:334] "Generic (PLEG): container finished" podID="b5900302-4880-4732-a477-8ed6cf3bfec3" containerID="00fb6c317c57413346dffe0c0ddda07a4f2a2c61d9ffd456f78c2ab1f561e73f" exitCode=1 Nov 26 22:45:24 crc kubenswrapper[4903]: I1126 22:45:24.552783 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" event={"ID":"b5900302-4880-4732-a477-8ed6cf3bfec3","Type":"ContainerDied","Data":"00fb6c317c57413346dffe0c0ddda07a4f2a2c61d9ffd456f78c2ab1f561e73f"} Nov 26 22:45:24 crc kubenswrapper[4903]: I1126 22:45:24.553245 4903 scope.go:117] "RemoveContainer" containerID="00fb6c317c57413346dffe0c0ddda07a4f2a2c61d9ffd456f78c2ab1f561e73f" Nov 26 22:45:24 crc kubenswrapper[4903]: E1126 22:45:24.553754 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=metallb-operator-controller-manager-57594f7c4c-gdzqb_metallb-system(b5900302-4880-4732-a477-8ed6cf3bfec3)\"" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" Nov 26 22:45:24 crc kubenswrapper[4903]: I1126 22:45:24.663080 4903 scope.go:117] "RemoveContainer" containerID="c623167ba1f18e5ba98ad1f8c49f9107b24293bcf656ecd7df27401a948418cb" Nov 26 22:45:25 crc kubenswrapper[4903]: I1126 22:45:25.054536 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54" Nov 26 22:45:25 crc kubenswrapper[4903]: I1126 22:45:25.443073 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" Nov 26 22:45:25 crc kubenswrapper[4903]: I1126 22:45:25.443137 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" Nov 26 22:45:25 crc kubenswrapper[4903]: I1126 22:45:25.444252 4903 scope.go:117] "RemoveContainer" containerID="9f2013d79e3bdcf5cf56513446bc03890a04a07a43e644047512ea4d009463b2" Nov 26 22:45:25 crc kubenswrapper[4903]: E1126 22:45:25.444740 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=openstack-operator-controller-manager-5467d974c6-lpj77_openstack-operators(9239ccfa-cbaa-44b2-a70f-94a281d885f6)\"" pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" podUID="9239ccfa-cbaa-44b2-a70f-94a281d885f6" Nov 26 22:45:25 crc kubenswrapper[4903]: I1126 22:45:25.580246 4903 generic.go:334] "Generic (PLEG): container finished" podID="6b930423-80e6-4e2c-825f-7deceec090f5" containerID="94a8b1243ee0dd0e214c43ce7484f1a52dd5b79532016eb06bf38b170a07d1ad" exitCode=1 Nov 26 22:45:25 crc kubenswrapper[4903]: I1126 22:45:25.580323 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" event={"ID":"6b930423-80e6-4e2c-825f-7deceec090f5","Type":"ContainerDied","Data":"94a8b1243ee0dd0e214c43ce7484f1a52dd5b79532016eb06bf38b170a07d1ad"} Nov 26 22:45:25 crc kubenswrapper[4903]: I1126 22:45:25.580359 4903 scope.go:117] "RemoveContainer" containerID="2e687205566a6e57fd161c08f633805da9374becf244df76b3177e89d7fde819" Nov 26 22:45:25 crc kubenswrapper[4903]: I1126 22:45:25.581379 4903 scope.go:117] "RemoveContainer" containerID="94a8b1243ee0dd0e214c43ce7484f1a52dd5b79532016eb06bf38b170a07d1ad" Nov 26 22:45:25 crc kubenswrapper[4903]: E1126 22:45:25.582114 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=octavia-operator-controller-manager-64cdc6ff96-nz8x4_openstack-operators(6b930423-80e6-4e2c-825f-7deceec090f5)\"" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" podUID="6b930423-80e6-4e2c-825f-7deceec090f5" Nov 26 22:45:26 crc kubenswrapper[4903]: I1126 22:45:26.609785 4903 scope.go:117] "RemoveContainer" containerID="94a8b1243ee0dd0e214c43ce7484f1a52dd5b79532016eb06bf38b170a07d1ad" Nov 26 22:45:26 crc kubenswrapper[4903]: E1126 22:45:26.610884 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=octavia-operator-controller-manager-64cdc6ff96-nz8x4_openstack-operators(6b930423-80e6-4e2c-825f-7deceec090f5)\"" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" podUID="6b930423-80e6-4e2c-825f-7deceec090f5" Nov 26 22:45:27 crc kubenswrapper[4903]: I1126 22:45:27.438933 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 26 22:45:27 crc kubenswrapper[4903]: I1126 22:45:27.440236 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-grpc" Nov 26 22:45:27 crc kubenswrapper[4903]: I1126 22:45:27.915317 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.070892 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.117958 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.251910 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-thanos-sidecar-tls" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.254368 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"kube-root-ca.crt" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.279025 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.378414 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.387554 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.410327 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-nntjc" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.494376 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"loki-operator-manager-config" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.579617 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.664523 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-6hzbx" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.666065 4903 scope.go:117] "RemoveContainer" containerID="11603c223759063b46c0c3d8e24fc3ba65d8a5f4aa0cf1fa1edf176fd82bd080" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.700897 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-n7krq" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.701869 4903 scope.go:117] "RemoveContainer" containerID="805233c52fe5f40f3e88e2d390896c7960ca9c682cbb181d62a654111950c174" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.702553 4903 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.702717 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.712044 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/designate-operator-controller-manager-955677c94-rtztw" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.712080 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-955677c94-rtztw" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.712934 4903 scope.go:117] "RemoveContainer" containerID="a8b78afbb892ea12c18116123ca011a0ae8b858f3430350e37fbd74f2f3239c8" Nov 26 22:45:28 crc kubenswrapper[4903]: E1126 22:45:28.713261 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=designate-operator-controller-manager-955677c94-rtztw_openstack-operators(63feada5-3911-469e-a0b1-539b7aa2948d)\"" pod="openstack-operators/designate-operator-controller-manager-955677c94-rtztw" podUID="63feada5-3911-469e-a0b1-539b7aa2948d" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.731842 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-shqxg" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.732967 4903 scope.go:117] "RemoveContainer" containerID="ddf341deb974b36987fb1ada1e0243a8768d0882468f018c1c37c69c176af664" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.808487 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.809467 4903 scope.go:117] "RemoveContainer" containerID="b94229746ff897363aeb92d1df8bfbdf6a208f988bb5701bcedbb853a344580c" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.809609 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr" Nov 26 22:45:28 crc kubenswrapper[4903]: E1126 22:45:28.809886 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=heat-operator-controller-manager-5b77f656f-x59hr_openstack-operators(e0c12217-0537-436e-b0d9-5e5049888268)\"" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr" podUID="e0c12217-0537-436e-b0d9-5e5049888268" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.827105 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.827806 4903 scope.go:117] "RemoveContainer" containerID="5740b81e1127d2577ec233b943238069cd46de36948c317d12052f468f55bcab" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.852024 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.895934 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.896988 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.897034 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.897850 4903 scope.go:117] "RemoveContainer" containerID="0df6f2cd8131e0da17bc972601caed560cc5e8a238ffdbe51297b1e4fdac1419" Nov 26 22:45:28 crc kubenswrapper[4903]: E1126 22:45:28.898111 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ironic-operator-controller-manager-67cb4dc6d4-bm7r7_openstack-operators(ced64189-a8c9-4e13-956b-f69139a9602b)\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" podUID="ced64189-a8c9-4e13-956b-f69139a9602b" Nov 26 22:45:28 crc kubenswrapper[4903]: I1126 22:45:28.945436 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-http" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.015058 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-token" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.022824 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-t5gqj" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.024227 4903 scope.go:117] "RemoveContainer" containerID="cad76ff4141b0aae8bb72453967c3ae58e2161daaded3db3af2ebcfd94f7b6b7" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.050377 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5kmlf" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.051488 4903 scope.go:117] "RemoveContainer" containerID="f4ca3bd9b58fdd3120309fa81b0dd7cd6658c254a4c193c43860f8c748366897" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.059736 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-5bd96487c4-8k4kq" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.076632 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vj562" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.077947 4903 scope.go:117] "RemoveContainer" containerID="a316d08fa9c8e5a120edb61e927cf269d58887a7490641363143db58eae09d65" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.086302 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-pzwmk" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.086427 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-pzwmk" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.087279 4903 scope.go:117] "RemoveContainer" containerID="5b840ea3a5fb538ffaba96a84254e79fc8370cfc2d0eed26c6b3310175ed8eec" Nov 26 22:45:29 crc kubenswrapper[4903]: E1126 22:45:29.087777 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=mariadb-operator-controller-manager-66f4dd4bc7-pzwmk_openstack-operators(32ccd880-8dfa-46d1-b262-5d10422527ec)\"" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-pzwmk" podUID="32ccd880-8dfa-46d1-b262-5d10422527ec" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.092407 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.092445 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.093028 4903 scope.go:117] "RemoveContainer" containerID="0346be5eacd9a55ef563fc4f37a01fa2c164dac887d8fd231e049a35a167664d" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.126309 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vjt6h" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.126763 4903 scope.go:117] "RemoveContainer" containerID="19a533539d7b78a39403e7495bda8184f336e894c58f24c88074d3be815a6b9b" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.184517 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-public-svc" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.191991 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jn49q" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.193533 4903 scope.go:117] "RemoveContainer" containerID="ab1864b435a34ff3a8668ca0a9a95e3350c7ebc39e6263d3be14220bc746842f" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.247579 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-dockercfg-xxdkf" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.281148 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-6986c4df8b-bkqnw" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.281203 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/telemetry-operator-controller-manager-6986c4df8b-bkqnw" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.281570 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-kdmtw" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.282071 4903 scope.go:117] "RemoveContainer" containerID="0e691f467ec275fad3c08ba8876e621a9042e2b03a4a3a1bee4b2ecaaada8121" Nov 26 22:45:29 crc kubenswrapper[4903]: E1126 22:45:29.282307 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=telemetry-operator-controller-manager-6986c4df8b-bkqnw_openstack-operators(3f2ebc07-fbfc-4bd6-9622-63b820e47247)\"" pod="openstack-operators/telemetry-operator-controller-manager-6986c4df8b-bkqnw" podUID="3f2ebc07-fbfc-4bd6-9622-63b820e47247" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.315752 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.376998 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-pdkfz" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.403092 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.404067 4903 scope.go:117] "RemoveContainer" containerID="94a8b1243ee0dd0e214c43ce7484f1a52dd5b79532016eb06bf38b170a07d1ad" Nov 26 22:45:29 crc kubenswrapper[4903]: E1126 22:45:29.404476 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=octavia-operator-controller-manager-64cdc6ff96-nz8x4_openstack-operators(6b930423-80e6-4e2c-825f-7deceec090f5)\"" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" podUID="6b930423-80e6-4e2c-825f-7deceec090f5" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.405512 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.443282 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.477104 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"monitoring-plugin-cert" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.540237 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2h7mb" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.541125 4903 scope.go:117] "RemoveContainer" containerID="e81c615b7aa0d42c8eac3cf141332857cf63bc69cb2bb47449f591981952b193" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.608860 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gw5wx" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.610296 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bwfhp" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.610674 4903 scope.go:117] "RemoveContainer" containerID="76b03db273f5b2435589e2e2768d1c9f1f333434138c04deec31f17ae71c3446" Nov 26 22:45:29 crc kubenswrapper[4903]: E1126 22:45:29.611872 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=watcher-operator-controller-manager-656dcb59d4-bwfhp_openstack-operators(f8815d8e-4b34-47b3-98fa-8370205381e0)\"" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bwfhp" podUID="f8815d8e-4b34-47b3-98fa-8370205381e0" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.612066 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bwfhp" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.617113 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-syslog-receiver" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.677976 4903 generic.go:334] "Generic (PLEG): container finished" podID="3e621847-5f60-491a-8e5c-f2fb10df1726" containerID="21baeec6c1765a561603ca595b5ac08c3c11f8791f1edc71af07817a48451595" exitCode=1 Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.678194 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-6hzbx" event={"ID":"3e621847-5f60-491a-8e5c-f2fb10df1726","Type":"ContainerDied","Data":"21baeec6c1765a561603ca595b5ac08c3c11f8791f1edc71af07817a48451595"} Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.678279 4903 scope.go:117] "RemoveContainer" containerID="11603c223759063b46c0c3d8e24fc3ba65d8a5f4aa0cf1fa1edf176fd82bd080" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.678950 4903 scope.go:117] "RemoveContainer" containerID="21baeec6c1765a561603ca595b5ac08c3c11f8791f1edc71af07817a48451595" Nov 26 22:45:29 crc kubenswrapper[4903]: E1126 22:45:29.679352 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=barbican-operator-controller-manager-7b64f4fb85-6hzbx_openstack-operators(3e621847-5f60-491a-8e5c-f2fb10df1726)\"" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-6hzbx" podUID="3e621847-5f60-491a-8e5c-f2fb10df1726" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.690244 4903 generic.go:334] "Generic (PLEG): container finished" podID="83927c87-ccd7-4b29-97b1-8d03ce0d1b1e" containerID="3ca95f237818586b3409d73648fa10d199f6d1c91c86d75b2f7ee72177a9382f" exitCode=1 Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.690294 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vjt6h" event={"ID":"83927c87-ccd7-4b29-97b1-8d03ce0d1b1e","Type":"ContainerDied","Data":"3ca95f237818586b3409d73648fa10d199f6d1c91c86d75b2f7ee72177a9382f"} Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.691000 4903 scope.go:117] "RemoveContainer" containerID="3ca95f237818586b3409d73648fa10d199f6d1c91c86d75b2f7ee72177a9382f" Nov 26 22:45:29 crc kubenswrapper[4903]: E1126 22:45:29.691234 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=placement-operator-controller-manager-57988cc5b5-vjt6h_openstack-operators(83927c87-ccd7-4b29-97b1-8d03ce0d1b1e)\"" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vjt6h" podUID="83927c87-ccd7-4b29-97b1-8d03ce0d1b1e" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.696327 4903 generic.go:334] "Generic (PLEG): container finished" podID="34b48ba8-04a0-463d-9e31-b7c13127ce9c" containerID="508331bb4524a2553924de70f3100d3610f7409e92880d284575e4772722ff7c" exitCode=1 Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.696383 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66" event={"ID":"34b48ba8-04a0-463d-9e31-b7c13127ce9c","Type":"ContainerDied","Data":"508331bb4524a2553924de70f3100d3610f7409e92880d284575e4772722ff7c"} Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.696893 4903 scope.go:117] "RemoveContainer" containerID="508331bb4524a2553924de70f3100d3610f7409e92880d284575e4772722ff7c" Nov 26 22:45:29 crc kubenswrapper[4903]: E1126 22:45:29.697129 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=horizon-operator-controller-manager-5d494799bf-v4b66_openstack-operators(34b48ba8-04a0-463d-9e31-b7c13127ce9c)\"" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66" podUID="34b48ba8-04a0-463d-9e31-b7c13127ce9c" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.699927 4903 generic.go:334] "Generic (PLEG): container finished" podID="710215b7-5e67-47d8-833f-b8db638cac56" containerID="cee97a990fe10c493e806de1e8e6fb1979d5c20f9a0542e828af7a584f429704" exitCode=1 Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.699961 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-shqxg" event={"ID":"710215b7-5e67-47d8-833f-b8db638cac56","Type":"ContainerDied","Data":"cee97a990fe10c493e806de1e8e6fb1979d5c20f9a0542e828af7a584f429704"} Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.700293 4903 scope.go:117] "RemoveContainer" containerID="cee97a990fe10c493e806de1e8e6fb1979d5c20f9a0542e828af7a584f429704" Nov 26 22:45:29 crc kubenswrapper[4903]: E1126 22:45:29.700497 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=glance-operator-controller-manager-589cbd6b5b-shqxg_openstack-operators(710215b7-5e67-47d8-833f-b8db638cac56)\"" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-shqxg" podUID="710215b7-5e67-47d8-833f-b8db638cac56" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.718266 4903 generic.go:334] "Generic (PLEG): container finished" podID="e3d89c00-9723-43a3-a1d2-866787257900" containerID="7d5aa399b04088fbdb7b1b51a148b4c5921e1db0aab0ae7d5e408bdd7a74719a" exitCode=1 Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.718358 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s" event={"ID":"e3d89c00-9723-43a3-a1d2-866787257900","Type":"ContainerDied","Data":"7d5aa399b04088fbdb7b1b51a148b4c5921e1db0aab0ae7d5e408bdd7a74719a"} Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.719191 4903 scope.go:117] "RemoveContainer" containerID="7d5aa399b04088fbdb7b1b51a148b4c5921e1db0aab0ae7d5e408bdd7a74719a" Nov 26 22:45:29 crc kubenswrapper[4903]: E1126 22:45:29.719455 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=keystone-operator-controller-manager-7b4567c7cf-kxg8s_openstack-operators(e3d89c00-9723-43a3-a1d2-866787257900)\"" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s" podUID="e3d89c00-9723-43a3-a1d2-866787257900" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.726702 4903 scope.go:117] "RemoveContainer" containerID="19a533539d7b78a39403e7495bda8184f336e894c58f24c88074d3be815a6b9b" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.731868 4903 generic.go:334] "Generic (PLEG): container finished" podID="d9a3465f-cd49-4af9-a908-58aec0273dbe" containerID="a7d8efc3ff63907f355298e4e0285b04cf5f37d74a0c2b9eac228f13b09e48f8" exitCode=1 Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.731928 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-n7krq" event={"ID":"d9a3465f-cd49-4af9-a908-58aec0273dbe","Type":"ContainerDied","Data":"a7d8efc3ff63907f355298e4e0285b04cf5f37d74a0c2b9eac228f13b09e48f8"} Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.732839 4903 scope.go:117] "RemoveContainer" containerID="a7d8efc3ff63907f355298e4e0285b04cf5f37d74a0c2b9eac228f13b09e48f8" Nov 26 22:45:29 crc kubenswrapper[4903]: E1126 22:45:29.733113 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=cinder-operator-controller-manager-6b7f75547b-n7krq_openstack-operators(d9a3465f-cd49-4af9-a908-58aec0273dbe)\"" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-n7krq" podUID="d9a3465f-cd49-4af9-a908-58aec0273dbe" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.736536 4903 generic.go:334] "Generic (PLEG): container finished" podID="fcacd7dc-2b08-46d7-98c2-09cf6b6d690b" containerID="d965f7d0d0f2c1e8bf7ffd88de09c8de8670303acf2a49ce60953ae7f0523de1" exitCode=1 Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.736598 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5kmlf" event={"ID":"fcacd7dc-2b08-46d7-98c2-09cf6b6d690b","Type":"ContainerDied","Data":"d965f7d0d0f2c1e8bf7ffd88de09c8de8670303acf2a49ce60953ae7f0523de1"} Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.737431 4903 scope.go:117] "RemoveContainer" containerID="d965f7d0d0f2c1e8bf7ffd88de09c8de8670303acf2a49ce60953ae7f0523de1" Nov 26 22:45:29 crc kubenswrapper[4903]: E1126 22:45:29.737710 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=nova-operator-controller-manager-79556f57fc-5kmlf_openstack-operators(fcacd7dc-2b08-46d7-98c2-09cf6b6d690b)\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5kmlf" podUID="fcacd7dc-2b08-46d7-98c2-09cf6b6d690b" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.740110 4903 generic.go:334] "Generic (PLEG): container finished" podID="9c3a16ab-252a-4a01-aaab-b273d3d55c0a" containerID="09e71a3b62b72dece188cb57a404bf3c90dfb7fc3d285a6e5a11476234906270" exitCode=1 Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.740175 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-t5gqj" event={"ID":"9c3a16ab-252a-4a01-aaab-b273d3d55c0a","Type":"ContainerDied","Data":"09e71a3b62b72dece188cb57a404bf3c90dfb7fc3d285a6e5a11476234906270"} Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.740572 4903 scope.go:117] "RemoveContainer" containerID="09e71a3b62b72dece188cb57a404bf3c90dfb7fc3d285a6e5a11476234906270" Nov 26 22:45:29 crc kubenswrapper[4903]: E1126 22:45:29.740953 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=neutron-operator-controller-manager-6fdcddb789-t5gqj_openstack-operators(9c3a16ab-252a-4a01-aaab-b273d3d55c0a)\"" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-t5gqj" podUID="9c3a16ab-252a-4a01-aaab-b273d3d55c0a" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.743576 4903 generic.go:334] "Generic (PLEG): container finished" podID="edfb7faf-e9af-4ee8-85cd-a11af5812946" containerID="ecf3375c398da58bf8c49d9aaaf8f1fac3a6c2212ce5b2d2bcfedd97d0a60a07" exitCode=1 Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.743630 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vj562" event={"ID":"edfb7faf-e9af-4ee8-85cd-a11af5812946","Type":"ContainerDied","Data":"ecf3375c398da58bf8c49d9aaaf8f1fac3a6c2212ce5b2d2bcfedd97d0a60a07"} Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.744490 4903 scope.go:117] "RemoveContainer" containerID="ecf3375c398da58bf8c49d9aaaf8f1fac3a6c2212ce5b2d2bcfedd97d0a60a07" Nov 26 22:45:29 crc kubenswrapper[4903]: E1126 22:45:29.744766 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=manila-operator-controller-manager-5d499bf58b-vj562_openstack-operators(edfb7faf-e9af-4ee8-85cd-a11af5812946)\"" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vj562" podUID="edfb7faf-e9af-4ee8-85cd-a11af5812946" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.748117 4903 generic.go:334] "Generic (PLEG): container finished" podID="0c7b8e09-c502-425e-ac59-b2befd1132fa" containerID="a779b9a2b9f8b4afffdfc9236ebee5724631ba3fa64507e587152db4f60112c1" exitCode=1 Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.748136 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jn49q" event={"ID":"0c7b8e09-c502-425e-ac59-b2befd1132fa","Type":"ContainerDied","Data":"a779b9a2b9f8b4afffdfc9236ebee5724631ba3fa64507e587152db4f60112c1"} Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.748646 4903 scope.go:117] "RemoveContainer" containerID="5b840ea3a5fb538ffaba96a84254e79fc8370cfc2d0eed26c6b3310175ed8eec" Nov 26 22:45:29 crc kubenswrapper[4903]: E1126 22:45:29.748939 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=mariadb-operator-controller-manager-66f4dd4bc7-pzwmk_openstack-operators(32ccd880-8dfa-46d1-b262-5d10422527ec)\"" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-pzwmk" podUID="32ccd880-8dfa-46d1-b262-5d10422527ec" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.749039 4903 scope.go:117] "RemoveContainer" containerID="b94229746ff897363aeb92d1df8bfbdf6a208f988bb5701bcedbb853a344580c" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.749319 4903 scope.go:117] "RemoveContainer" containerID="a779b9a2b9f8b4afffdfc9236ebee5724631ba3fa64507e587152db4f60112c1" Nov 26 22:45:29 crc kubenswrapper[4903]: E1126 22:45:29.749544 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ovn-operator-controller-manager-56897c768d-jn49q_openstack-operators(0c7b8e09-c502-425e-ac59-b2befd1132fa)\"" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jn49q" podUID="0c7b8e09-c502-425e-ac59-b2befd1132fa" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.749847 4903 scope.go:117] "RemoveContainer" containerID="76b03db273f5b2435589e2e2768d1c9f1f333434138c04deec31f17ae71c3446" Nov 26 22:45:29 crc kubenswrapper[4903]: E1126 22:45:29.750057 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=watcher-operator-controller-manager-656dcb59d4-bwfhp_openstack-operators(f8815d8e-4b34-47b3-98fa-8370205381e0)\"" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bwfhp" podUID="f8815d8e-4b34-47b3-98fa-8370205381e0" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.779680 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-t95pk" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.786152 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 26 22:45:29 crc kubenswrapper[4903]: E1126 22:45:29.819920 4903 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode3d89c00_9723_43a3_a1d2_866787257900.slice/crio-conmon-7d5aa399b04088fbdb7b1b51a148b4c5921e1db0aab0ae7d5e408bdd7a74719a.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode3d89c00_9723_43a3_a1d2_866787257900.slice/crio-7d5aa399b04088fbdb7b1b51a148b4c5921e1db0aab0ae7d5e408bdd7a74719a.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfcacd7dc_2b08_46d7_98c2_09cf6b6d690b.slice/crio-conmon-d965f7d0d0f2c1e8bf7ffd88de09c8de8670303acf2a49ce60953ae7f0523de1.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podedfb7faf_e9af_4ee8_85cd_a11af5812946.slice/crio-conmon-ecf3375c398da58bf8c49d9aaaf8f1fac3a6c2212ce5b2d2bcfedd97d0a60a07.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9c3a16ab_252a_4a01_aaab_b273d3d55c0a.slice/crio-conmon-09e71a3b62b72dece188cb57a404bf3c90dfb7fc3d285a6e5a11476234906270.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod83927c87_ccd7_4b29_97b1_8d03ce0d1b1e.slice/crio-conmon-3ca95f237818586b3409d73648fa10d199f6d1c91c86d75b2f7ee72177a9382f.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9c3a16ab_252a_4a01_aaab_b273d3d55c0a.slice/crio-09e71a3b62b72dece188cb57a404bf3c90dfb7fc3d285a6e5a11476234906270.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0c7b8e09_c502_425e_ac59_b2befd1132fa.slice/crio-conmon-a779b9a2b9f8b4afffdfc9236ebee5724631ba3fa64507e587152db4f60112c1.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podedfb7faf_e9af_4ee8_85cd_a11af5812946.slice/crio-ecf3375c398da58bf8c49d9aaaf8f1fac3a6c2212ce5b2d2bcfedd97d0a60a07.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod83927c87_ccd7_4b29_97b1_8d03ce0d1b1e.slice/crio-3ca95f237818586b3409d73648fa10d199f6d1c91c86d75b2f7ee72177a9382f.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfcacd7dc_2b08_46d7_98c2_09cf6b6d690b.slice/crio-d965f7d0d0f2c1e8bf7ffd88de09c8de8670303acf2a49ce60953ae7f0523de1.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0c7b8e09_c502_425e_ac59_b2befd1132fa.slice/crio-a779b9a2b9f8b4afffdfc9236ebee5724631ba3fa64507e587152db4f60112c1.scope\": RecentStats: unable to find data in memory cache]" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.838243 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.860031 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.890903 4903 scope.go:117] "RemoveContainer" containerID="5740b81e1127d2577ec233b943238069cd46de36948c317d12052f468f55bcab" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.947570 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 26 22:45:29 crc kubenswrapper[4903]: I1126 22:45:29.964158 4903 scope.go:117] "RemoveContainer" containerID="ddf341deb974b36987fb1ada1e0243a8768d0882468f018c1c37c69c176af664" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.018734 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.050108 4903 scope.go:117] "RemoveContainer" containerID="0346be5eacd9a55ef563fc4f37a01fa2c164dac887d8fd231e049a35a167664d" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.082375 4903 scope.go:117] "RemoveContainer" containerID="805233c52fe5f40f3e88e2d390896c7960ca9c682cbb181d62a654111950c174" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.109212 4903 scope.go:117] "RemoveContainer" containerID="f4ca3bd9b58fdd3120309fa81b0dd7cd6658c254a4c193c43860f8c748366897" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.130642 4903 scope.go:117] "RemoveContainer" containerID="cad76ff4141b0aae8bb72453967c3ae58e2161daaded3db3af2ebcfd94f7b6b7" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.153239 4903 scope.go:117] "RemoveContainer" containerID="a316d08fa9c8e5a120edb61e927cf269d58887a7490641363143db58eae09d65" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.157684 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.177391 4903 scope.go:117] "RemoveContainer" containerID="ab1864b435a34ff3a8668ca0a9a95e3350c7ebc39e6263d3be14220bc746842f" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.279934 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.284158 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.284972 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-rules" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.314889 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.378822 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.482660 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.509812 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.576573 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.593932 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.707629 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.710043 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-kube-rbac-proxy-web" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.726098 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.772211 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.785214 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.792450 4903 generic.go:334] "Generic (PLEG): container finished" podID="e0c12217-0537-436e-b0d9-5e5049888268" containerID="7b65d18b731c0b86b837cb5e70b87b20f0dd73061b139b0e54763e2d727b6566" exitCode=1 Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.792521 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr" event={"ID":"e0c12217-0537-436e-b0d9-5e5049888268","Type":"ContainerDied","Data":"7b65d18b731c0b86b837cb5e70b87b20f0dd73061b139b0e54763e2d727b6566"} Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.792563 4903 scope.go:117] "RemoveContainer" containerID="b94229746ff897363aeb92d1df8bfbdf6a208f988bb5701bcedbb853a344580c" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.793527 4903 scope.go:117] "RemoveContainer" containerID="7b65d18b731c0b86b837cb5e70b87b20f0dd73061b139b0e54763e2d727b6566" Nov 26 22:45:30 crc kubenswrapper[4903]: E1126 22:45:30.793994 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=heat-operator-controller-manager-5b77f656f-x59hr_openstack-operators(e0c12217-0537-436e-b0d9-5e5049888268)\"" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr" podUID="e0c12217-0537-436e-b0d9-5e5049888268" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.797659 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.803065 4903 generic.go:334] "Generic (PLEG): container finished" podID="736b757c-8584-4b59-81d6-ffdd8bbac62c" containerID="2f66cf166e7446809d7d31a5cadb31b7f839d1c1a6e64600adc189f3e4dba9ab" exitCode=1 Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.803125 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2h7mb" event={"ID":"736b757c-8584-4b59-81d6-ffdd8bbac62c","Type":"ContainerDied","Data":"2f66cf166e7446809d7d31a5cadb31b7f839d1c1a6e64600adc189f3e4dba9ab"} Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.804244 4903 scope.go:117] "RemoveContainer" containerID="2f66cf166e7446809d7d31a5cadb31b7f839d1c1a6e64600adc189f3e4dba9ab" Nov 26 22:45:30 crc kubenswrapper[4903]: E1126 22:45:30.804840 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=swift-operator-controller-manager-d77b94747-2h7mb_openstack-operators(736b757c-8584-4b59-81d6-ffdd8bbac62c)\"" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2h7mb" podUID="736b757c-8584-4b59-81d6-ffdd8bbac62c" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.851811 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.892981 4903 scope.go:117] "RemoveContainer" containerID="e81c615b7aa0d42c8eac3cf141332857cf63bc69cb2bb47449f591981952b193" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.907770 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.934155 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 26 22:45:30 crc kubenswrapper[4903]: I1126 22:45:30.992262 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-grpc-tls-agrneh23ptocm" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.028651 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-config-data" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.073606 4903 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.080140 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-g77bs" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.102282 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.195918 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.371062 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.407141 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.429783 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.433541 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.475370 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"metrics-server-audit-profiles" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.484723 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.487391 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-dockercfg-29bb5" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.492590 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.493485 4903 scope.go:117] "RemoveContainer" containerID="00fb6c317c57413346dffe0c0ddda07a4f2a2c61d9ffd456f78c2ab1f561e73f" Nov 26 22:45:31 crc kubenswrapper[4903]: E1126 22:45:31.493851 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=metallb-operator-controller-manager-57594f7c4c-gdzqb_metallb-system(b5900302-4880-4732-a477-8ed6cf3bfec3)\"" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.526010 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.562192 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.563012 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.591361 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.646629 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.647349 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.655133 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-w9p4f" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.657017 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-dockercfg-w7mr5" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.686253 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.711146 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.712385 4903 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.712598 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.743052 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.758155 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.794235 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.799533 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.809998 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.839829 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-jnmgr" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.892160 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-dockercfg-kw2jj" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.892174 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.920484 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-kl6hf" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.923881 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"cluster-logging-operator-dockercfg-kf6h9" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.932401 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.981335 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 22:45:31 crc kubenswrapper[4903]: I1126 22:45:31.981410 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.011174 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-internal-svc" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.133142 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.138203 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.202757 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.236156 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-tls" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.239731 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.292431 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.303290 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.318007 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-5w4jc" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.321669 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.323181 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.332382 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.349983 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.427335 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.435940 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.451301 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.454616 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/kube-state-metrics-0" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.454683 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.455997 4903 scope.go:117] "RemoveContainer" containerID="c2450cce3468d69a60f2d74e34daec8cfa728e35f26892f2237db68674914921" Nov 26 22:45:32 crc kubenswrapper[4903]: E1126 22:45:32.456467 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-state-metrics pod=kube-state-metrics-0_openstack(f58d4082-e69c-44e2-9961-9842cb738869)\"" pod="openstack/kube-state-metrics-0" podUID="f58d4082-e69c-44e2-9961-9842cb738869" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.458624 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-config" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.469576 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-gn24s" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.473134 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-tls" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.482446 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.495259 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.502990 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.527385 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.566318 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.620082 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.631856 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.639660 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.649476 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.652111 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.656745 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.704422 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.779627 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-http" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.830657 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.857129 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-vfdfv" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.898391 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.900922 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.954900 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.958119 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 26 22:45:32 crc kubenswrapper[4903]: I1126 22:45:32.982824 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-jqrgv" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.007559 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.033914 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.130239 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.131648 4903 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.149123 4903 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-xdhsn" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.173889 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.215201 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.304924 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.342260 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.367119 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.399458 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.405808 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.440187 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.480068 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.547529 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.548858 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.550779 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.571580 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-s3" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.578789 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.605495 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.617358 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"openshift-service-ca.crt" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.618062 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.651333 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.665837 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.669746 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.705647 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.740370 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.748177 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.808193 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.810871 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.832005 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.840313 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.859657 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.863980 4903 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-t9wfd" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.866089 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.866910 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.879533 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.899378 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.929181 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.940646 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.956928 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 26 22:45:33 crc kubenswrapper[4903]: I1126 22:45:33.993192 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.023312 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-dockercfg-nrccd" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.051014 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.054273 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-f8gjd" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.060379 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-internal-svc" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.087591 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.090065 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"kube-root-ca.crt" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.094509 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.123548 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.124224 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.167460 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-x9sx2" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.182390 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.212981 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-public-svc" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.217045 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-client-http" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.218557 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.222351 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.231103 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.238229 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.286799 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.287172 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-zbvr7" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.300992 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-tls" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.331312 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.342906 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.356573 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.367448 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.375448 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.426749 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-metrics" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.459652 4903 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.492112 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.498680 4903 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-7stmr" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.540489 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.545534 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kube-root-ca.crt" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.554462 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-http" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.597408 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.633892 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"telemetry-config" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.639812 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.693453 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-jzktn" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.696315 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.713448 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-kube-rbac-proxy-config" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.721058 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.722009 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.739976 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.767290 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.794347 4903 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.804657 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.823164 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.824563 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-6cdkz" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.830312 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"kube-root-ca.crt" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.845334 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-grpc" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.847501 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.890224 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.899257 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-93ubnjffr43m1" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.934178 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.940301 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy-metric" Nov 26 22:45:34 crc kubenswrapper[4903]: I1126 22:45:34.994816 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-tls" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.028684 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.044235 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.047887 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.060008 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.096259 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-9lhcw" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.111801 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.170983 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.171221 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.217319 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.247246 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.263402 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.266077 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.318011 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.334076 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.361750 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-kr9b7" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.411438 4903 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.454549 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.458849 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"metrics-client-ca" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.467348 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.519563 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-kube-rbac-proxy-config" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.537790 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.551831 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.598108 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.608322 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.633941 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.654314 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.654437 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.656167 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.670637 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.689124 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.698730 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.716772 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.762134 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"default-dockercfg-6tstp" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.765400 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.825032 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.910441 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.941715 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 26 22:45:35 crc kubenswrapper[4903]: I1126 22:45:35.948151 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.027717 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.028741 4903 scope.go:117] "RemoveContainer" containerID="e147c460e04625d298597155031751a7e09945e545d5d0de83e8535709d852ae" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.081875 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.166836 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-metrics" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.180769 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-trustbundle" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.186626 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-thanos-prometheus-http-client-file" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.199617 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.205828 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-generated" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.207484 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.219266 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.314019 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"serving-certs-ca-bundle" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.333012 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.340567 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.403913 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.419378 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-dockercfg-n5rgp" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.445258 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.463444 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.466595 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.475975 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.527414 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-pd465" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.545708 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"cluster-monitoring-operator-dockercfg-wwt9l" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.554925 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.568581 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-http" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.588669 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kube-state-metrics-custom-resource-state-configmap" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.589137 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.592739 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-xmp9c" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.626845 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.636155 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-cxzdg" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.666022 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.759054 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.788388 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-cbr9g" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.789506 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.846784 4903 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.866018 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.891757 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.899178 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw" event={"ID":"b34e8bed-559a-49d6-b870-c375f36be49f","Type":"ContainerStarted","Data":"8b44b9bfb4941b55c0d4709018155ed1bc35fab36718ba9010d6c7f0dd55e03b"} Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.899445 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.935201 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.939564 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 26 22:45:36 crc kubenswrapper[4903]: I1126 22:45:36.984454 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-4g2ls" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.016026 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.029390 4903 scope.go:117] "RemoveContainer" containerID="b61f1738921cbe64573b58376e2be8fc44d76ed21e99391ab498002f537ec2d0" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.039529 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.060604 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-kube-rbac-proxy-config" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.064727 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-4mfcj" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.065385 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-sctsj" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.103144 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.178654 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-lzclj" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.189739 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.241773 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.252128 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.269463 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.315538 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.444166 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.494549 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.505722 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.511643 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.520793 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.571174 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-9xvp4" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.601797 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.621426 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-ca-bundle" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.753536 4903 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-wn999" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.778956 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.782322 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.833938 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-tls" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.858531 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.867776 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.908342 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.910022 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.916498 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-fzd8p" event={"ID":"8248a160-f606-4eaa-9bc1-0e7fcc1ab852","Type":"ContainerStarted","Data":"1afb0d2b6979b5c9292c8cbdd8b6626c68a42fc9dde58e8712b2e2dddff716bc"} Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.935335 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.959923 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-kube-rbac-proxy-config" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.966116 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.975847 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-tls" Nov 26 22:45:37 crc kubenswrapper[4903]: I1126 22:45:37.988269 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-m2kjp" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.001654 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-metrics" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.070050 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-dockercfg-tvvts" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.113405 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"prometheus-trusted-ca-bundle" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.130273 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.139056 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-lvs9v" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.178442 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.178502 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.178828 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards-sa-dockercfg-tx8t5" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.179451 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.203288 4903 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.205795 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-grpc" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.218673 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.250651 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.281820 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.296896 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.298360 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-http" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.326983 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.340326 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.347763 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-http" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.349261 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.389099 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-config" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.410388 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.423505 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.449029 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.461345 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.461781 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.469025 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.470817 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.477659 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.483684 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.493230 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.497147 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.499795 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.516454 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.629460 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.646139 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.648401 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.664451 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-6hzbx" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.665973 4903 scope.go:117] "RemoveContainer" containerID="21baeec6c1765a561603ca595b5ac08c3c11f8791f1edc71af07817a48451595" Nov 26 22:45:38 crc kubenswrapper[4903]: E1126 22:45:38.666606 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=barbican-operator-controller-manager-7b64f4fb85-6hzbx_openstack-operators(3e621847-5f60-491a-8e5c-f2fb10df1726)\"" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-6hzbx" podUID="3e621847-5f60-491a-8e5c-f2fb10df1726" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.667810 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.677038 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.679084 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.700549 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-n7krq" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.701544 4903 scope.go:117] "RemoveContainer" containerID="a7d8efc3ff63907f355298e4e0285b04cf5f37d74a0c2b9eac228f13b09e48f8" Nov 26 22:45:38 crc kubenswrapper[4903]: E1126 22:45:38.701861 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=cinder-operator-controller-manager-6b7f75547b-n7krq_openstack-operators(d9a3465f-cd49-4af9-a908-58aec0273dbe)\"" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-n7krq" podUID="d9a3465f-cd49-4af9-a908-58aec0273dbe" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.731790 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-shqxg" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.732869 4903 scope.go:117] "RemoveContainer" containerID="cee97a990fe10c493e806de1e8e6fb1979d5c20f9a0542e828af7a584f429704" Nov 26 22:45:38 crc kubenswrapper[4903]: E1126 22:45:38.733246 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=glance-operator-controller-manager-589cbd6b5b-shqxg_openstack-operators(710215b7-5e67-47d8-833f-b8db638cac56)\"" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-shqxg" podUID="710215b7-5e67-47d8-833f-b8db638cac56" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.754132 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.789248 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.808373 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.809397 4903 scope.go:117] "RemoveContainer" containerID="7b65d18b731c0b86b837cb5e70b87b20f0dd73061b139b0e54763e2d727b6566" Nov 26 22:45:38 crc kubenswrapper[4903]: E1126 22:45:38.809764 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=heat-operator-controller-manager-5b77f656f-x59hr_openstack-operators(e0c12217-0537-436e-b0d9-5e5049888268)\"" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr" podUID="e0c12217-0537-436e-b0d9-5e5049888268" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.826974 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.827861 4903 scope.go:117] "RemoveContainer" containerID="508331bb4524a2553924de70f3100d3610f7409e92880d284575e4772722ff7c" Nov 26 22:45:38 crc kubenswrapper[4903]: E1126 22:45:38.828153 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=horizon-operator-controller-manager-5d494799bf-v4b66_openstack-operators(34b48ba8-04a0-463d-9e31-b7c13127ce9c)\"" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66" podUID="34b48ba8-04a0-463d-9e31-b7c13127ce9c" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.860930 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.871145 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.881016 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.921311 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-f2kt7" Nov 26 22:45:38 crc kubenswrapper[4903]: I1126 22:45:38.957038 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.002385 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-nc6m5" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.007453 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.008616 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.023016 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-t5gqj" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.023985 4903 scope.go:117] "RemoveContainer" containerID="09e71a3b62b72dece188cb57a404bf3c90dfb7fc3d285a6e5a11476234906270" Nov 26 22:45:39 crc kubenswrapper[4903]: E1126 22:45:39.024327 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=neutron-operator-controller-manager-6fdcddb789-t5gqj_openstack-operators(9c3a16ab-252a-4a01-aaab-b273d3d55c0a)\"" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-t5gqj" podUID="9c3a16ab-252a-4a01-aaab-b273d3d55c0a" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.028621 4903 scope.go:117] "RemoveContainer" containerID="9f2013d79e3bdcf5cf56513446bc03890a04a07a43e644047512ea4d009463b2" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.035328 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.046999 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.047250 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.049925 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5kmlf" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.050026 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.050838 4903 scope.go:117] "RemoveContainer" containerID="d965f7d0d0f2c1e8bf7ffd88de09c8de8670303acf2a49ce60953ae7f0523de1" Nov 26 22:45:39 crc kubenswrapper[4903]: E1126 22:45:39.051246 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=nova-operator-controller-manager-79556f57fc-5kmlf_openstack-operators(fcacd7dc-2b08-46d7-98c2-09cf6b6d690b)\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5kmlf" podUID="fcacd7dc-2b08-46d7-98c2-09cf6b6d690b" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.078435 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway-ca-bundle" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.078717 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vj562" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.080073 4903 scope.go:117] "RemoveContainer" containerID="ecf3375c398da58bf8c49d9aaaf8f1fac3a6c2212ce5b2d2bcfedd97d0a60a07" Nov 26 22:45:39 crc kubenswrapper[4903]: E1126 22:45:39.080395 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=manila-operator-controller-manager-5d499bf58b-vj562_openstack-operators(edfb7faf-e9af-4ee8-85cd-a11af5812946)\"" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vj562" podUID="edfb7faf-e9af-4ee8-85cd-a11af5812946" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.085573 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-lhfsx" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.092169 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.092977 4903 scope.go:117] "RemoveContainer" containerID="7d5aa399b04088fbdb7b1b51a148b4c5921e1db0aab0ae7d5e408bdd7a74719a" Nov 26 22:45:39 crc kubenswrapper[4903]: E1126 22:45:39.093211 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=keystone-operator-controller-manager-7b4567c7cf-kxg8s_openstack-operators(e3d89c00-9723-43a3-a1d2-866787257900)\"" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s" podUID="e3d89c00-9723-43a3-a1d2-866787257900" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.106532 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.126653 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vjt6h" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.130453 4903 scope.go:117] "RemoveContainer" containerID="3ca95f237818586b3409d73648fa10d199f6d1c91c86d75b2f7ee72177a9382f" Nov 26 22:45:39 crc kubenswrapper[4903]: E1126 22:45:39.131591 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=placement-operator-controller-manager-57988cc5b5-vjt6h_openstack-operators(83927c87-ccd7-4b29-97b1-8d03ce0d1b1e)\"" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vjt6h" podUID="83927c87-ccd7-4b29-97b1-8d03ce0d1b1e" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.140249 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.191984 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jn49q" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.193025 4903 scope.go:117] "RemoveContainer" containerID="a779b9a2b9f8b4afffdfc9236ebee5724631ba3fa64507e587152db4f60112c1" Nov 26 22:45:39 crc kubenswrapper[4903]: E1126 22:45:39.193284 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ovn-operator-controller-manager-56897c768d-jn49q_openstack-operators(0c7b8e09-c502-425e-ac59-b2befd1132fa)\"" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jn49q" podUID="0c7b8e09-c502-425e-ac59-b2befd1132fa" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.204835 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.237790 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.247127 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-q7t2v" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.250678 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.250891 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-qhzgz" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.255074 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"openshift-service-ca.crt" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.289499 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.290848 4903 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.303192 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.327522 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-r5bq9" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.341153 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-web-config" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.341405 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.369531 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.406561 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.464072 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.474761 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.513361 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-dockercfg-j85km" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.522972 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.541242 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2h7mb" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.542564 4903 scope.go:117] "RemoveContainer" containerID="2f66cf166e7446809d7d31a5cadb31b7f839d1c1a6e64600adc189f3e4dba9ab" Nov 26 22:45:39 crc kubenswrapper[4903]: E1126 22:45:39.542899 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=swift-operator-controller-manager-d77b94747-2h7mb_openstack-operators(736b757c-8584-4b59-81d6-ffdd8bbac62c)\"" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2h7mb" podUID="736b757c-8584-4b59-81d6-ffdd8bbac62c" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.560626 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.562087 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-tls" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.572995 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-dockercfg-t9227" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.594792 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-dockercfg-4ncq2" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.620059 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-wxx6f" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.631857 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.657243 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.685210 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"prometheus-k8s-rulefiles-0" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.749421 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.750780 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.774603 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-rbac-proxy" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.823611 4903 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.857195 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-hdb9h" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.866893 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.902831 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.903265 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.927960 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-tls" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.945026 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.949706 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" event={"ID":"9239ccfa-cbaa-44b2-a70f-94a281d885f6","Type":"ContainerStarted","Data":"12a05e1191bf4cf7a95d8583ec95f1af50adc307b306bbeed4d24e1b806452f3"} Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.953930 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.955762 4903 generic.go:334] "Generic (PLEG): container finished" podID="580a58c8-ce17-4d85-991a-e51d3eb639b3" containerID="3426b2d0e3f7e8a4e497ee7604da437b01133a3ef07a12e33c2e751479b1b5db" exitCode=1 Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.955794 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-dfvzf" event={"ID":"580a58c8-ce17-4d85-991a-e51d3eb639b3","Type":"ContainerDied","Data":"3426b2d0e3f7e8a4e497ee7604da437b01133a3ef07a12e33c2e751479b1b5db"} Nov 26 22:45:39 crc kubenswrapper[4903]: I1126 22:45:39.957514 4903 scope.go:117] "RemoveContainer" containerID="3426b2d0e3f7e8a4e497ee7604da437b01133a3ef07a12e33c2e751479b1b5db" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.019849 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-web-config" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.029222 4903 scope.go:117] "RemoveContainer" containerID="94a8b1243ee0dd0e214c43ce7484f1a52dd5b79532016eb06bf38b170a07d1ad" Nov 26 22:45:40 crc kubenswrapper[4903]: E1126 22:45:40.029483 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=octavia-operator-controller-manager-64cdc6ff96-nz8x4_openstack-operators(6b930423-80e6-4e2c-825f-7deceec090f5)\"" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" podUID="6b930423-80e6-4e2c-825f-7deceec090f5" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.029887 4903 scope.go:117] "RemoveContainer" containerID="0df6f2cd8131e0da17bc972601caed560cc5e8a238ffdbe51297b1e4fdac1419" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.030093 4903 scope.go:117] "RemoveContainer" containerID="0e691f467ec275fad3c08ba8876e621a9042e2b03a4a3a1bee4b2ecaaada8121" Nov 26 22:45:40 crc kubenswrapper[4903]: E1126 22:45:40.030202 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ironic-operator-controller-manager-67cb4dc6d4-bm7r7_openstack-operators(ced64189-a8c9-4e13-956b-f69139a9602b)\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" podUID="ced64189-a8c9-4e13-956b-f69139a9602b" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.040010 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.062378 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-dockercfg-2pcg5" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.107987 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy-web" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.118076 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.153988 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.254150 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-phnjg" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.345800 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-http" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.363944 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.364198 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-service-cert" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.396084 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.396838 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.428718 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.471627 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.495576 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.504236 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-kfg7g" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.506427 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.511817 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.519861 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.601594 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-jzrr8" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.634675 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-wvnqm" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.640284 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.646562 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-client-certs" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.700960 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.702107 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.703542 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.723947 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.780192 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-tls-assets-0" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.829355 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.835927 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.950396 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.952295 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"cluster-monitoring-operator-tls" Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.971254 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-dfvzf" event={"ID":"580a58c8-ce17-4d85-991a-e51d3eb639b3","Type":"ContainerStarted","Data":"967fe8cd22f044ffcb129a0904489b3c9207f2ace79c4b491944b372719d29cd"} Nov 26 22:45:40 crc kubenswrapper[4903]: I1126 22:45:40.974241 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-6986c4df8b-bkqnw" event={"ID":"3f2ebc07-fbfc-4bd6-9622-63b820e47247","Type":"ContainerStarted","Data":"3f14d321faed132e7e5a857a2c1c20389b8df86923d8dc152cf5427086da9491"} Nov 26 22:45:41 crc kubenswrapper[4903]: I1126 22:45:41.029219 4903 scope.go:117] "RemoveContainer" containerID="76b03db273f5b2435589e2e2768d1c9f1f333434138c04deec31f17ae71c3446" Nov 26 22:45:41 crc kubenswrapper[4903]: I1126 22:45:41.034155 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-tls-assets-0" Nov 26 22:45:41 crc kubenswrapper[4903]: I1126 22:45:41.060188 4903 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-b97hq" Nov 26 22:45:41 crc kubenswrapper[4903]: I1126 22:45:41.090443 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 26 22:45:41 crc kubenswrapper[4903]: I1126 22:45:41.149295 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 26 22:45:41 crc kubenswrapper[4903]: I1126 22:45:41.162219 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 26 22:45:41 crc kubenswrapper[4903]: I1126 22:45:41.181149 4903 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 26 22:45:41 crc kubenswrapper[4903]: I1126 22:45:41.187875 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 26 22:45:41 crc kubenswrapper[4903]: I1126 22:45:41.201155 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-jvntk" Nov 26 22:45:41 crc kubenswrapper[4903]: I1126 22:45:41.230744 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 26 22:45:41 crc kubenswrapper[4903]: I1126 22:45:41.258901 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 26 22:45:41 crc kubenswrapper[4903]: I1126 22:45:41.320579 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 26 22:45:41 crc kubenswrapper[4903]: I1126 22:45:41.335440 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 22:45:41 crc kubenswrapper[4903]: I1126 22:45:41.397863 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-d6t7w" Nov 26 22:45:41 crc kubenswrapper[4903]: I1126 22:45:41.432547 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 26 22:45:41 crc kubenswrapper[4903]: I1126 22:45:41.472855 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 26 22:45:41 crc kubenswrapper[4903]: I1126 22:45:41.474676 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 26 22:45:41 crc kubenswrapper[4903]: I1126 22:45:41.475421 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 26 22:45:41 crc kubenswrapper[4903]: I1126 22:45:41.554856 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 26 22:45:41 crc kubenswrapper[4903]: I1126 22:45:41.562055 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 26 22:45:41 crc kubenswrapper[4903]: I1126 22:45:41.571711 4903 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-vtsfk" Nov 26 22:45:41 crc kubenswrapper[4903]: I1126 22:45:41.576746 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 26 22:45:41 crc kubenswrapper[4903]: I1126 22:45:41.883740 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-grpc" Nov 26 22:45:41 crc kubenswrapper[4903]: I1126 22:45:41.919352 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 26 22:45:41 crc kubenswrapper[4903]: I1126 22:45:41.981019 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-7scs2" Nov 26 22:45:41 crc kubenswrapper[4903]: I1126 22:45:41.984070 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-bh2qf" Nov 26 22:45:41 crc kubenswrapper[4903]: I1126 22:45:41.991910 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bwfhp" event={"ID":"f8815d8e-4b34-47b3-98fa-8370205381e0","Type":"ContainerStarted","Data":"c03bbead11ad3620f48974b3a3c159bf4c96f636f1694e93921b35e2b633aa37"} Nov 26 22:45:41 crc kubenswrapper[4903]: I1126 22:45:41.993857 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bwfhp" Nov 26 22:45:42 crc kubenswrapper[4903]: I1126 22:45:42.029298 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 26 22:45:42 crc kubenswrapper[4903]: I1126 22:45:42.090510 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-fx5z4" Nov 26 22:45:42 crc kubenswrapper[4903]: I1126 22:45:42.114424 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 26 22:45:42 crc kubenswrapper[4903]: I1126 22:45:42.160512 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 26 22:45:42 crc kubenswrapper[4903]: I1126 22:45:42.204743 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 26 22:45:42 crc kubenswrapper[4903]: I1126 22:45:42.374027 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-tls" Nov 26 22:45:42 crc kubenswrapper[4903]: I1126 22:45:42.388794 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 26 22:45:42 crc kubenswrapper[4903]: I1126 22:45:42.403213 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 26 22:45:42 crc kubenswrapper[4903]: I1126 22:45:42.434734 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 26 22:45:42 crc kubenswrapper[4903]: I1126 22:45:42.443728 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-dockercfg-gc2r8" Nov 26 22:45:42 crc kubenswrapper[4903]: I1126 22:45:42.453763 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-6jfkt" Nov 26 22:45:42 crc kubenswrapper[4903]: I1126 22:45:42.462501 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 26 22:45:42 crc kubenswrapper[4903]: I1126 22:45:42.479467 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 26 22:45:42 crc kubenswrapper[4903]: I1126 22:45:42.526385 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 26 22:45:42 crc kubenswrapper[4903]: I1126 22:45:42.531859 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-wn2mx" Nov 26 22:45:42 crc kubenswrapper[4903]: I1126 22:45:42.533871 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 26 22:45:42 crc kubenswrapper[4903]: I1126 22:45:42.562383 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 26 22:45:42 crc kubenswrapper[4903]: I1126 22:45:42.617864 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"openshift-service-ca.crt" Nov 26 22:45:42 crc kubenswrapper[4903]: I1126 22:45:42.924484 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 26 22:45:43 crc kubenswrapper[4903]: I1126 22:45:43.015899 4903 generic.go:334] "Generic (PLEG): container finished" podID="de11c064-60b1-4f96-a316-bc903f061766" containerID="7021f26343b9006bc74dda37289ee15e2590e70ff5c00d899c335ede8e43dfab" exitCode=1 Nov 26 22:45:43 crc kubenswrapper[4903]: I1126 22:45:43.017834 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-qdk8j" event={"ID":"de11c064-60b1-4f96-a316-bc903f061766","Type":"ContainerDied","Data":"7021f26343b9006bc74dda37289ee15e2590e70ff5c00d899c335ede8e43dfab"} Nov 26 22:45:43 crc kubenswrapper[4903]: I1126 22:45:43.018842 4903 scope.go:117] "RemoveContainer" containerID="7021f26343b9006bc74dda37289ee15e2590e70ff5c00d899c335ede8e43dfab" Nov 26 22:45:43 crc kubenswrapper[4903]: I1126 22:45:43.029897 4903 scope.go:117] "RemoveContainer" containerID="5b840ea3a5fb538ffaba96a84254e79fc8370cfc2d0eed26c6b3310175ed8eec" Nov 26 22:45:43 crc kubenswrapper[4903]: I1126 22:45:43.078368 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 26 22:45:43 crc kubenswrapper[4903]: I1126 22:45:43.310912 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 26 22:45:43 crc kubenswrapper[4903]: I1126 22:45:43.321186 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 26 22:45:43 crc kubenswrapper[4903]: I1126 22:45:43.473892 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards" Nov 26 22:45:43 crc kubenswrapper[4903]: I1126 22:45:43.510384 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway" Nov 26 22:45:43 crc kubenswrapper[4903]: I1126 22:45:43.515777 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="cert-manager/cert-manager-5b446d88c5-qdk8j" Nov 26 22:45:43 crc kubenswrapper[4903]: I1126 22:45:43.607673 4903 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 26 22:45:43 crc kubenswrapper[4903]: I1126 22:45:43.608658 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 26 22:45:43 crc kubenswrapper[4903]: I1126 22:45:43.701884 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-2jtg6" Nov 26 22:45:43 crc kubenswrapper[4903]: I1126 22:45:43.708478 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-grpc-tls-e9tj3rmasj09h" Nov 26 22:45:43 crc kubenswrapper[4903]: I1126 22:45:43.743427 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 26 22:45:43 crc kubenswrapper[4903]: I1126 22:45:43.744580 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 26 22:45:43 crc kubenswrapper[4903]: I1126 22:45:43.911548 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 26 22:45:44 crc kubenswrapper[4903]: I1126 22:45:44.028917 4903 scope.go:117] "RemoveContainer" containerID="a8b78afbb892ea12c18116123ca011a0ae8b858f3430350e37fbd74f2f3239c8" Nov 26 22:45:44 crc kubenswrapper[4903]: I1126 22:45:44.048538 4903 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-qq9fm" Nov 26 22:45:44 crc kubenswrapper[4903]: I1126 22:45:44.051350 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-pzwmk" event={"ID":"32ccd880-8dfa-46d1-b262-5d10422527ec","Type":"ContainerStarted","Data":"d12d199f3bee3c41af0b0f7c2e96f7261da4b52e9f6b495ccd03b9684fa763ed"} Nov 26 22:45:44 crc kubenswrapper[4903]: I1126 22:45:44.051403 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-qdk8j" event={"ID":"de11c064-60b1-4f96-a316-bc903f061766","Type":"ContainerStarted","Data":"444f297b121a95e7d96eef63b33ea9c42410f373934365831a9254f328d93e56"} Nov 26 22:45:44 crc kubenswrapper[4903]: I1126 22:45:44.051727 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-pzwmk" Nov 26 22:45:44 crc kubenswrapper[4903]: I1126 22:45:44.310272 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 26 22:45:44 crc kubenswrapper[4903]: I1126 22:45:44.463788 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-57548d458d-tdlsw" Nov 26 22:45:44 crc kubenswrapper[4903]: I1126 22:45:44.525277 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 26 22:45:44 crc kubenswrapper[4903]: I1126 22:45:44.573468 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-bg9qc" Nov 26 22:45:44 crc kubenswrapper[4903]: I1126 22:45:44.770519 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-web" Nov 26 22:45:45 crc kubenswrapper[4903]: I1126 22:45:45.029070 4903 scope.go:117] "RemoveContainer" containerID="00fb6c317c57413346dffe0c0ddda07a4f2a2c61d9ffd456f78c2ab1f561e73f" Nov 26 22:45:45 crc kubenswrapper[4903]: I1126 22:45:45.046817 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-rtztw" event={"ID":"63feada5-3911-469e-a0b1-539b7aa2948d","Type":"ContainerStarted","Data":"fc984c009dcf9b951081f619cf809c04715fd95351ee4d5184f383f3176d90ae"} Nov 26 22:45:45 crc kubenswrapper[4903]: I1126 22:45:45.047782 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-955677c94-rtztw" Nov 26 22:45:45 crc kubenswrapper[4903]: I1126 22:45:45.185129 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 26 22:45:45 crc kubenswrapper[4903]: I1126 22:45:45.453474 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-5467d974c6-lpj77" Nov 26 22:45:46 crc kubenswrapper[4903]: I1126 22:45:46.063570 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" event={"ID":"b5900302-4880-4732-a477-8ed6cf3bfec3","Type":"ContainerStarted","Data":"ca84a094eae0897716516c2c819744e6ec3daf1c9d41e3ba2a9b4082f4689bcf"} Nov 26 22:45:48 crc kubenswrapper[4903]: I1126 22:45:48.029646 4903 scope.go:117] "RemoveContainer" containerID="c2450cce3468d69a60f2d74e34daec8cfa728e35f26892f2237db68674914921" Nov 26 22:45:48 crc kubenswrapper[4903]: I1126 22:45:48.665039 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-6hzbx" Nov 26 22:45:48 crc kubenswrapper[4903]: I1126 22:45:48.667460 4903 scope.go:117] "RemoveContainer" containerID="21baeec6c1765a561603ca595b5ac08c3c11f8791f1edc71af07817a48451595" Nov 26 22:45:48 crc kubenswrapper[4903]: E1126 22:45:48.667961 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=barbican-operator-controller-manager-7b64f4fb85-6hzbx_openstack-operators(3e621847-5f60-491a-8e5c-f2fb10df1726)\"" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-6hzbx" podUID="3e621847-5f60-491a-8e5c-f2fb10df1726" Nov 26 22:45:48 crc kubenswrapper[4903]: I1126 22:45:48.701228 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-n7krq" Nov 26 22:45:48 crc kubenswrapper[4903]: I1126 22:45:48.702405 4903 scope.go:117] "RemoveContainer" containerID="a7d8efc3ff63907f355298e4e0285b04cf5f37d74a0c2b9eac228f13b09e48f8" Nov 26 22:45:48 crc kubenswrapper[4903]: E1126 22:45:48.702887 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=cinder-operator-controller-manager-6b7f75547b-n7krq_openstack-operators(d9a3465f-cd49-4af9-a908-58aec0273dbe)\"" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-n7krq" podUID="d9a3465f-cd49-4af9-a908-58aec0273dbe" Nov 26 22:45:48 crc kubenswrapper[4903]: I1126 22:45:48.731741 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-shqxg" Nov 26 22:45:48 crc kubenswrapper[4903]: I1126 22:45:48.732889 4903 scope.go:117] "RemoveContainer" containerID="cee97a990fe10c493e806de1e8e6fb1979d5c20f9a0542e828af7a584f429704" Nov 26 22:45:48 crc kubenswrapper[4903]: E1126 22:45:48.733359 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=glance-operator-controller-manager-589cbd6b5b-shqxg_openstack-operators(710215b7-5e67-47d8-833f-b8db638cac56)\"" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-shqxg" podUID="710215b7-5e67-47d8-833f-b8db638cac56" Nov 26 22:45:48 crc kubenswrapper[4903]: I1126 22:45:48.809889 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr" Nov 26 22:45:48 crc kubenswrapper[4903]: I1126 22:45:48.810900 4903 scope.go:117] "RemoveContainer" containerID="7b65d18b731c0b86b837cb5e70b87b20f0dd73061b139b0e54763e2d727b6566" Nov 26 22:45:48 crc kubenswrapper[4903]: E1126 22:45:48.811248 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=heat-operator-controller-manager-5b77f656f-x59hr_openstack-operators(e0c12217-0537-436e-b0d9-5e5049888268)\"" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr" podUID="e0c12217-0537-436e-b0d9-5e5049888268" Nov 26 22:45:48 crc kubenswrapper[4903]: I1126 22:45:48.827631 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66" Nov 26 22:45:48 crc kubenswrapper[4903]: I1126 22:45:48.828616 4903 scope.go:117] "RemoveContainer" containerID="508331bb4524a2553924de70f3100d3610f7409e92880d284575e4772722ff7c" Nov 26 22:45:48 crc kubenswrapper[4903]: E1126 22:45:48.828980 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=horizon-operator-controller-manager-5d494799bf-v4b66_openstack-operators(34b48ba8-04a0-463d-9e31-b7c13127ce9c)\"" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66" podUID="34b48ba8-04a0-463d-9e31-b7c13127ce9c" Nov 26 22:45:49 crc kubenswrapper[4903]: I1126 22:45:49.023121 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-t5gqj" Nov 26 22:45:49 crc kubenswrapper[4903]: I1126 22:45:49.024148 4903 scope.go:117] "RemoveContainer" containerID="09e71a3b62b72dece188cb57a404bf3c90dfb7fc3d285a6e5a11476234906270" Nov 26 22:45:49 crc kubenswrapper[4903]: E1126 22:45:49.024487 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=neutron-operator-controller-manager-6fdcddb789-t5gqj_openstack-operators(9c3a16ab-252a-4a01-aaab-b273d3d55c0a)\"" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-t5gqj" podUID="9c3a16ab-252a-4a01-aaab-b273d3d55c0a" Nov 26 22:45:49 crc kubenswrapper[4903]: I1126 22:45:49.050740 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5kmlf" Nov 26 22:45:49 crc kubenswrapper[4903]: I1126 22:45:49.051268 4903 scope.go:117] "RemoveContainer" containerID="d965f7d0d0f2c1e8bf7ffd88de09c8de8670303acf2a49ce60953ae7f0523de1" Nov 26 22:45:49 crc kubenswrapper[4903]: E1126 22:45:49.051577 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=nova-operator-controller-manager-79556f57fc-5kmlf_openstack-operators(fcacd7dc-2b08-46d7-98c2-09cf6b6d690b)\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5kmlf" podUID="fcacd7dc-2b08-46d7-98c2-09cf6b6d690b" Nov 26 22:45:49 crc kubenswrapper[4903]: I1126 22:45:49.076081 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vj562" Nov 26 22:45:49 crc kubenswrapper[4903]: I1126 22:45:49.077254 4903 scope.go:117] "RemoveContainer" containerID="ecf3375c398da58bf8c49d9aaaf8f1fac3a6c2212ce5b2d2bcfedd97d0a60a07" Nov 26 22:45:49 crc kubenswrapper[4903]: E1126 22:45:49.077601 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=manila-operator-controller-manager-5d499bf58b-vj562_openstack-operators(edfb7faf-e9af-4ee8-85cd-a11af5812946)\"" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vj562" podUID="edfb7faf-e9af-4ee8-85cd-a11af5812946" Nov 26 22:45:49 crc kubenswrapper[4903]: I1126 22:45:49.088831 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-pzwmk" Nov 26 22:45:49 crc kubenswrapper[4903]: I1126 22:45:49.092421 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s" Nov 26 22:45:49 crc kubenswrapper[4903]: I1126 22:45:49.093267 4903 scope.go:117] "RemoveContainer" containerID="7d5aa399b04088fbdb7b1b51a148b4c5921e1db0aab0ae7d5e408bdd7a74719a" Nov 26 22:45:49 crc kubenswrapper[4903]: E1126 22:45:49.093763 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=keystone-operator-controller-manager-7b4567c7cf-kxg8s_openstack-operators(e3d89c00-9723-43a3-a1d2-866787257900)\"" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s" podUID="e3d89c00-9723-43a3-a1d2-866787257900" Nov 26 22:45:49 crc kubenswrapper[4903]: I1126 22:45:49.126848 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vjt6h" Nov 26 22:45:49 crc kubenswrapper[4903]: I1126 22:45:49.128065 4903 scope.go:117] "RemoveContainer" containerID="3ca95f237818586b3409d73648fa10d199f6d1c91c86d75b2f7ee72177a9382f" Nov 26 22:45:49 crc kubenswrapper[4903]: E1126 22:45:49.128538 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=placement-operator-controller-manager-57988cc5b5-vjt6h_openstack-operators(83927c87-ccd7-4b29-97b1-8d03ce0d1b1e)\"" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vjt6h" podUID="83927c87-ccd7-4b29-97b1-8d03ce0d1b1e" Nov 26 22:45:49 crc kubenswrapper[4903]: I1126 22:45:49.180771 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f58d4082-e69c-44e2-9961-9842cb738869","Type":"ContainerStarted","Data":"35037d56510b3726b4dea82db297200bcd800204b1720350b319b85a81adcfd5"} Nov 26 22:45:49 crc kubenswrapper[4903]: I1126 22:45:49.181051 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 26 22:45:49 crc kubenswrapper[4903]: I1126 22:45:49.191368 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jn49q" Nov 26 22:45:49 crc kubenswrapper[4903]: I1126 22:45:49.192555 4903 scope.go:117] "RemoveContainer" containerID="a779b9a2b9f8b4afffdfc9236ebee5724631ba3fa64507e587152db4f60112c1" Nov 26 22:45:49 crc kubenswrapper[4903]: E1126 22:45:49.193199 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ovn-operator-controller-manager-56897c768d-jn49q_openstack-operators(0c7b8e09-c502-425e-ac59-b2befd1132fa)\"" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jn49q" podUID="0c7b8e09-c502-425e-ac59-b2befd1132fa" Nov 26 22:45:49 crc kubenswrapper[4903]: I1126 22:45:49.282070 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-6986c4df8b-bkqnw" Nov 26 22:45:49 crc kubenswrapper[4903]: I1126 22:45:49.289356 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-6986c4df8b-bkqnw" Nov 26 22:45:49 crc kubenswrapper[4903]: I1126 22:45:49.540770 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2h7mb" Nov 26 22:45:49 crc kubenswrapper[4903]: I1126 22:45:49.541447 4903 scope.go:117] "RemoveContainer" containerID="2f66cf166e7446809d7d31a5cadb31b7f839d1c1a6e64600adc189f3e4dba9ab" Nov 26 22:45:49 crc kubenswrapper[4903]: E1126 22:45:49.541828 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=swift-operator-controller-manager-d77b94747-2h7mb_openstack-operators(736b757c-8584-4b59-81d6-ffdd8bbac62c)\"" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2h7mb" podUID="736b757c-8584-4b59-81d6-ffdd8bbac62c" Nov 26 22:45:49 crc kubenswrapper[4903]: I1126 22:45:49.611709 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bwfhp" Nov 26 22:45:49 crc kubenswrapper[4903]: I1126 22:45:49.915473 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-grpc" Nov 26 22:45:50 crc kubenswrapper[4903]: I1126 22:45:50.872652 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 26 22:45:51 crc kubenswrapper[4903]: I1126 22:45:51.002265 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway" Nov 26 22:45:51 crc kubenswrapper[4903]: I1126 22:45:51.029338 4903 scope.go:117] "RemoveContainer" containerID="94a8b1243ee0dd0e214c43ce7484f1a52dd5b79532016eb06bf38b170a07d1ad" Nov 26 22:45:51 crc kubenswrapper[4903]: I1126 22:45:51.492483 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" Nov 26 22:45:52 crc kubenswrapper[4903]: I1126 22:45:52.243316 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" event={"ID":"6b930423-80e6-4e2c-825f-7deceec090f5","Type":"ContainerStarted","Data":"3bffff2751406430b4e6101e4e32ec421476d1f8f9987b405df812e1291596d8"} Nov 26 22:45:52 crc kubenswrapper[4903]: I1126 22:45:52.246378 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" Nov 26 22:45:53 crc kubenswrapper[4903]: I1126 22:45:53.028229 4903 scope.go:117] "RemoveContainer" containerID="0df6f2cd8131e0da17bc972601caed560cc5e8a238ffdbe51297b1e4fdac1419" Nov 26 22:45:54 crc kubenswrapper[4903]: I1126 22:45:54.088678 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 26 22:45:54 crc kubenswrapper[4903]: I1126 22:45:54.270373 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" event={"ID":"ced64189-a8c9-4e13-956b-f69139a9602b","Type":"ContainerStarted","Data":"0a841ef6b6558190aca2ad2511090a96bfbdcd2f73c121fdde1084fc2af6e0ce"} Nov 26 22:45:54 crc kubenswrapper[4903]: I1126 22:45:54.270744 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" Nov 26 22:45:55 crc kubenswrapper[4903]: I1126 22:45:55.132129 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 26 22:45:55 crc kubenswrapper[4903]: I1126 22:45:55.310533 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-dockercfg-7mft4" Nov 26 22:45:58 crc kubenswrapper[4903]: I1126 22:45:58.059834 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 26 22:45:58 crc kubenswrapper[4903]: I1126 22:45:58.141928 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 26 22:45:58 crc kubenswrapper[4903]: I1126 22:45:58.714934 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-955677c94-rtztw" Nov 26 22:45:58 crc kubenswrapper[4903]: I1126 22:45:58.900556 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-bm7r7" Nov 26 22:45:59 crc kubenswrapper[4903]: I1126 22:45:59.406108 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-nz8x4" Nov 26 22:46:00 crc kubenswrapper[4903]: I1126 22:46:00.031416 4903 scope.go:117] "RemoveContainer" containerID="508331bb4524a2553924de70f3100d3610f7409e92880d284575e4772722ff7c" Nov 26 22:46:00 crc kubenswrapper[4903]: I1126 22:46:00.242003 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 26 22:46:00 crc kubenswrapper[4903]: I1126 22:46:00.379168 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66" event={"ID":"34b48ba8-04a0-463d-9e31-b7c13127ce9c","Type":"ContainerStarted","Data":"c1997708d07b50f7c211926278d84cec2543cb0bae869f944afb9f7e2c7336e6"} Nov 26 22:46:00 crc kubenswrapper[4903]: I1126 22:46:00.379881 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66" Nov 26 22:46:00 crc kubenswrapper[4903]: I1126 22:46:00.432363 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"openshift-service-ca.crt" Nov 26 22:46:00 crc kubenswrapper[4903]: I1126 22:46:00.809499 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 26 22:46:01 crc kubenswrapper[4903]: I1126 22:46:01.029670 4903 scope.go:117] "RemoveContainer" containerID="ecf3375c398da58bf8c49d9aaaf8f1fac3a6c2212ce5b2d2bcfedd97d0a60a07" Nov 26 22:46:01 crc kubenswrapper[4903]: I1126 22:46:01.030079 4903 scope.go:117] "RemoveContainer" containerID="09e71a3b62b72dece188cb57a404bf3c90dfb7fc3d285a6e5a11476234906270" Nov 26 22:46:01 crc kubenswrapper[4903]: I1126 22:46:01.030259 4903 scope.go:117] "RemoveContainer" containerID="cee97a990fe10c493e806de1e8e6fb1979d5c20f9a0542e828af7a584f429704" Nov 26 22:46:01 crc kubenswrapper[4903]: I1126 22:46:01.030680 4903 scope.go:117] "RemoveContainer" containerID="7d5aa399b04088fbdb7b1b51a148b4c5921e1db0aab0ae7d5e408bdd7a74719a" Nov 26 22:46:01 crc kubenswrapper[4903]: I1126 22:46:01.031450 4903 scope.go:117] "RemoveContainer" containerID="2f66cf166e7446809d7d31a5cadb31b7f839d1c1a6e64600adc189f3e4dba9ab" Nov 26 22:46:01 crc kubenswrapper[4903]: I1126 22:46:01.981620 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 22:46:01 crc kubenswrapper[4903]: I1126 22:46:01.981911 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 22:46:01 crc kubenswrapper[4903]: I1126 22:46:01.981957 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 22:46:01 crc kubenswrapper[4903]: I1126 22:46:01.982831 4903 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a7d63365977e5a796bff719bf93bb8deb37153f9e84b6763869530932a4e1b36"} pod="openshift-machine-config-operator/machine-config-daemon-wjwph" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 22:46:01 crc kubenswrapper[4903]: I1126 22:46:01.982898 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" containerID="cri-o://a7d63365977e5a796bff719bf93bb8deb37153f9e84b6763869530932a4e1b36" gracePeriod=600 Nov 26 22:46:02 crc kubenswrapper[4903]: I1126 22:46:02.058385 4903 scope.go:117] "RemoveContainer" containerID="21baeec6c1765a561603ca595b5ac08c3c11f8791f1edc71af07817a48451595" Nov 26 22:46:02 crc kubenswrapper[4903]: I1126 22:46:02.058503 4903 scope.go:117] "RemoveContainer" containerID="a7d8efc3ff63907f355298e4e0285b04cf5f37d74a0c2b9eac228f13b09e48f8" Nov 26 22:46:02 crc kubenswrapper[4903]: I1126 22:46:02.265461 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kubelet-serving-ca-bundle" Nov 26 22:46:02 crc kubenswrapper[4903]: I1126 22:46:02.404403 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2h7mb" event={"ID":"736b757c-8584-4b59-81d6-ffdd8bbac62c","Type":"ContainerStarted","Data":"2f4c1418fc272039838948072e4998b18989e076e967ec9b730f5e6be3ab9a2a"} Nov 26 22:46:02 crc kubenswrapper[4903]: I1126 22:46:02.405159 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2h7mb" Nov 26 22:46:02 crc kubenswrapper[4903]: I1126 22:46:02.410338 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-shqxg" event={"ID":"710215b7-5e67-47d8-833f-b8db638cac56","Type":"ContainerStarted","Data":"8aa4710c71673032bd03e8721b224a911a4c61138944f3e692c4c8f9763566f0"} Nov 26 22:46:02 crc kubenswrapper[4903]: I1126 22:46:02.411224 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-shqxg" Nov 26 22:46:02 crc kubenswrapper[4903]: I1126 22:46:02.414306 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-t5gqj" event={"ID":"9c3a16ab-252a-4a01-aaab-b273d3d55c0a","Type":"ContainerStarted","Data":"539c598611b4eeb6e63fc2fea2d84c050001e0e6bae70a0e726d35d8014ecfa0"} Nov 26 22:46:02 crc kubenswrapper[4903]: I1126 22:46:02.415066 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-t5gqj" Nov 26 22:46:02 crc kubenswrapper[4903]: I1126 22:46:02.429232 4903 generic.go:334] "Generic (PLEG): container finished" podID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerID="a7d63365977e5a796bff719bf93bb8deb37153f9e84b6763869530932a4e1b36" exitCode=0 Nov 26 22:46:02 crc kubenswrapper[4903]: I1126 22:46:02.429317 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerDied","Data":"a7d63365977e5a796bff719bf93bb8deb37153f9e84b6763869530932a4e1b36"} Nov 26 22:46:02 crc kubenswrapper[4903]: I1126 22:46:02.429350 4903 scope.go:117] "RemoveContainer" containerID="847f128b302f65a898dbd9690c3fc64381891e05dcde345636fcc588de735302" Nov 26 22:46:02 crc kubenswrapper[4903]: I1126 22:46:02.434702 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vj562" event={"ID":"edfb7faf-e9af-4ee8-85cd-a11af5812946","Type":"ContainerStarted","Data":"005c018a072f89e34c195943bf01a1e1caa65c7f69f1712e37c088d97a990d3c"} Nov 26 22:46:02 crc kubenswrapper[4903]: I1126 22:46:02.435729 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vj562" Nov 26 22:46:02 crc kubenswrapper[4903]: I1126 22:46:02.438682 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s" event={"ID":"e3d89c00-9723-43a3-a1d2-866787257900","Type":"ContainerStarted","Data":"5757bf32185f6b257c20535e500f8bc9d4c9ebe3c628cf539822e9e8a7e81d2f"} Nov 26 22:46:02 crc kubenswrapper[4903]: I1126 22:46:02.439178 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s" Nov 26 22:46:02 crc kubenswrapper[4903]: I1126 22:46:02.460120 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 26 22:46:03 crc kubenswrapper[4903]: I1126 22:46:03.028303 4903 scope.go:117] "RemoveContainer" containerID="3ca95f237818586b3409d73648fa10d199f6d1c91c86d75b2f7ee72177a9382f" Nov 26 22:46:03 crc kubenswrapper[4903]: I1126 22:46:03.028785 4903 scope.go:117] "RemoveContainer" containerID="d965f7d0d0f2c1e8bf7ffd88de09c8de8670303acf2a49ce60953ae7f0523de1" Nov 26 22:46:03 crc kubenswrapper[4903]: I1126 22:46:03.449728 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5kmlf" event={"ID":"fcacd7dc-2b08-46d7-98c2-09cf6b6d690b","Type":"ContainerStarted","Data":"69ff1cd0bb34d6fd846208c4096413b557633ecdef502f28276c48135076642e"} Nov 26 22:46:03 crc kubenswrapper[4903]: I1126 22:46:03.451999 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5kmlf" Nov 26 22:46:03 crc kubenswrapper[4903]: I1126 22:46:03.455325 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerStarted","Data":"5225d111a6887c6aff9adea34e52808b691574abede178f64ce796508df9a2c9"} Nov 26 22:46:03 crc kubenswrapper[4903]: I1126 22:46:03.459030 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-6hzbx" event={"ID":"3e621847-5f60-491a-8e5c-f2fb10df1726","Type":"ContainerStarted","Data":"4b2a9ba298777eff203b5ac799064d6d05cce980d7155088e250b13c7cad98a0"} Nov 26 22:46:03 crc kubenswrapper[4903]: I1126 22:46:03.459498 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-6hzbx" Nov 26 22:46:03 crc kubenswrapper[4903]: I1126 22:46:03.469660 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-n7krq" event={"ID":"d9a3465f-cd49-4af9-a908-58aec0273dbe","Type":"ContainerStarted","Data":"4deded7db55a9d1029522fa14bb1af2a3061eab284d5d683801d090a472ebb78"} Nov 26 22:46:03 crc kubenswrapper[4903]: I1126 22:46:03.469986 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-n7krq" Nov 26 22:46:03 crc kubenswrapper[4903]: I1126 22:46:03.473655 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vjt6h" event={"ID":"83927c87-ccd7-4b29-97b1-8d03ce0d1b1e","Type":"ContainerStarted","Data":"eb8a8444a06d512b16ade6c32b1e4cbc3989a6dedb1cce3707f6ffa0a1eb52ff"} Nov 26 22:46:04 crc kubenswrapper[4903]: I1126 22:46:04.029207 4903 scope.go:117] "RemoveContainer" containerID="a779b9a2b9f8b4afffdfc9236ebee5724631ba3fa64507e587152db4f60112c1" Nov 26 22:46:04 crc kubenswrapper[4903]: I1126 22:46:04.041448 4903 scope.go:117] "RemoveContainer" containerID="7b65d18b731c0b86b837cb5e70b87b20f0dd73061b139b0e54763e2d727b6566" Nov 26 22:46:04 crc kubenswrapper[4903]: I1126 22:46:04.487917 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr" event={"ID":"e0c12217-0537-436e-b0d9-5e5049888268","Type":"ContainerStarted","Data":"c63cdf42fd150460d3407ef9e67e264becccb19bcbaf3547f05c62067c2586d6"} Nov 26 22:46:04 crc kubenswrapper[4903]: I1126 22:46:04.488262 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr" Nov 26 22:46:04 crc kubenswrapper[4903]: I1126 22:46:04.490925 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jn49q" event={"ID":"0c7b8e09-c502-425e-ac59-b2befd1132fa","Type":"ContainerStarted","Data":"b6ee955e31ca26c9d5dbcdd3f35a1299725220ca8b717afb197087eba435a58f"} Nov 26 22:46:06 crc kubenswrapper[4903]: I1126 22:46:06.991284 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 26 22:46:07 crc kubenswrapper[4903]: I1126 22:46:07.139874 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-grpc" Nov 26 22:46:07 crc kubenswrapper[4903]: I1126 22:46:07.278281 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.476608 4903 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.485604 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-bxpxf" podStartSLOduration=69.83540776 podStartE2EDuration="1m15.485585453s" podCreationTimestamp="2025-11-26 22:44:53 +0000 UTC" firstStartedPulling="2025-11-26 22:44:55.638908355 +0000 UTC m=+1424.329143275" lastFinishedPulling="2025-11-26 22:45:01.289086028 +0000 UTC m=+1429.979320968" observedRunningTime="2025-11-26 22:45:17.942386423 +0000 UTC m=+1446.632621343" watchObservedRunningTime="2025-11-26 22:46:08.485585453 +0000 UTC m=+1497.175820363" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.490898 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0","openshift-kube-apiserver/kube-apiserver-crc","openstack/aodh-0"] Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.490949 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0","openstack/aodh-0","openshift-kube-apiserver/kube-apiserver-crc","openshift-operator-lifecycle-manager/collect-profiles-29403285-qjtgm"] Nov 26 22:46:08 crc kubenswrapper[4903]: E1126 22:46:08.491452 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" containerName="aodh-listener" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.491469 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" containerName="aodh-listener" Nov 26 22:46:08 crc kubenswrapper[4903]: E1126 22:46:08.491532 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6253c12a-e41a-4476-abda-3b3b7ff084b3" containerName="proxy-httpd" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.491540 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="6253c12a-e41a-4476-abda-3b3b7ff084b3" containerName="proxy-httpd" Nov 26 22:46:08 crc kubenswrapper[4903]: E1126 22:46:08.491561 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6253c12a-e41a-4476-abda-3b3b7ff084b3" containerName="ceilometer-notification-agent" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.491567 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="6253c12a-e41a-4476-abda-3b3b7ff084b3" containerName="ceilometer-notification-agent" Nov 26 22:46:08 crc kubenswrapper[4903]: E1126 22:46:08.491584 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6253c12a-e41a-4476-abda-3b3b7ff084b3" containerName="ceilometer-central-agent" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.491590 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="6253c12a-e41a-4476-abda-3b3b7ff084b3" containerName="ceilometer-central-agent" Nov 26 22:46:08 crc kubenswrapper[4903]: E1126 22:46:08.491617 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6253c12a-e41a-4476-abda-3b3b7ff084b3" containerName="sg-core" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.491624 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="6253c12a-e41a-4476-abda-3b3b7ff084b3" containerName="sg-core" Nov 26 22:46:08 crc kubenswrapper[4903]: E1126 22:46:08.491645 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" containerName="installer" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.491673 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" containerName="installer" Nov 26 22:46:08 crc kubenswrapper[4903]: E1126 22:46:08.491709 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" containerName="aodh-evaluator" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.491726 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" containerName="aodh-evaluator" Nov 26 22:46:08 crc kubenswrapper[4903]: E1126 22:46:08.491749 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" containerName="aodh-notifier" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.491756 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" containerName="aodh-notifier" Nov 26 22:46:08 crc kubenswrapper[4903]: E1126 22:46:08.491765 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" containerName="aodh-api" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.491771 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" containerName="aodh-api" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.492006 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="6253c12a-e41a-4476-abda-3b3b7ff084b3" containerName="sg-core" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.492017 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="d22f4962-7fe8-4565-92df-3316c71e2079" containerName="installer" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.492027 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" containerName="aodh-notifier" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.492040 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="6253c12a-e41a-4476-abda-3b3b7ff084b3" containerName="ceilometer-central-agent" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.492055 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="6253c12a-e41a-4476-abda-3b3b7ff084b3" containerName="proxy-httpd" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.492066 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" containerName="aodh-listener" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.492075 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" containerName="aodh-evaluator" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.492088 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="6253c12a-e41a-4476-abda-3b3b7ff084b3" containerName="ceilometer-notification-agent" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.492109 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" containerName="aodh-api" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.500123 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.504251 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.504263 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.524401 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.527767 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-pkbb4" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.530392 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.530620 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.530741 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.531836 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.533055 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.533280 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403285-qjtgm" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.535258 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.535887 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.551795 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.555645 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=50.55562647 podStartE2EDuration="50.55562647s" podCreationTimestamp="2025-11-26 22:45:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:46:08.525927674 +0000 UTC m=+1497.216162594" watchObservedRunningTime="2025-11-26 22:46:08.55562647 +0000 UTC m=+1497.245861380" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.592894 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83b4acbb-6ade-4bd9-9506-4a0a95829480-scripts\") pod \"aodh-0\" (UID: \"83b4acbb-6ade-4bd9-9506-4a0a95829480\") " pod="openstack/aodh-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.592947 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/380938eb-7cfc-4a1c-8710-bc2279ca6b82-secret-volume\") pod \"collect-profiles-29403285-qjtgm\" (UID: \"380938eb-7cfc-4a1c-8710-bc2279ca6b82\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403285-qjtgm" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.592971 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83b4acbb-6ade-4bd9-9506-4a0a95829480-config-data\") pod \"aodh-0\" (UID: \"83b4acbb-6ade-4bd9-9506-4a0a95829480\") " pod="openstack/aodh-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.592996 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d127bade-9b0b-4d82-bfa2-656c4986fb18-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d127bade-9b0b-4d82-bfa2-656c4986fb18\") " pod="openstack/ceilometer-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.593039 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fks25\" (UniqueName: \"kubernetes.io/projected/380938eb-7cfc-4a1c-8710-bc2279ca6b82-kube-api-access-fks25\") pod \"collect-profiles-29403285-qjtgm\" (UID: \"380938eb-7cfc-4a1c-8710-bc2279ca6b82\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403285-qjtgm" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.593058 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d127bade-9b0b-4d82-bfa2-656c4986fb18-config-data\") pod \"ceilometer-0\" (UID: \"d127bade-9b0b-4d82-bfa2-656c4986fb18\") " pod="openstack/ceilometer-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.593073 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d127bade-9b0b-4d82-bfa2-656c4986fb18-run-httpd\") pod \"ceilometer-0\" (UID: \"d127bade-9b0b-4d82-bfa2-656c4986fb18\") " pod="openstack/ceilometer-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.593088 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/83b4acbb-6ade-4bd9-9506-4a0a95829480-internal-tls-certs\") pod \"aodh-0\" (UID: \"83b4acbb-6ade-4bd9-9506-4a0a95829480\") " pod="openstack/aodh-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.593104 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dfbt5\" (UniqueName: \"kubernetes.io/projected/83b4acbb-6ade-4bd9-9506-4a0a95829480-kube-api-access-dfbt5\") pod \"aodh-0\" (UID: \"83b4acbb-6ade-4bd9-9506-4a0a95829480\") " pod="openstack/aodh-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.593124 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d127bade-9b0b-4d82-bfa2-656c4986fb18-scripts\") pod \"ceilometer-0\" (UID: \"d127bade-9b0b-4d82-bfa2-656c4986fb18\") " pod="openstack/ceilometer-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.593138 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/83b4acbb-6ade-4bd9-9506-4a0a95829480-public-tls-certs\") pod \"aodh-0\" (UID: \"83b4acbb-6ade-4bd9-9506-4a0a95829480\") " pod="openstack/aodh-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.593170 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d127bade-9b0b-4d82-bfa2-656c4986fb18-log-httpd\") pod \"ceilometer-0\" (UID: \"d127bade-9b0b-4d82-bfa2-656c4986fb18\") " pod="openstack/ceilometer-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.593225 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djqvp\" (UniqueName: \"kubernetes.io/projected/d127bade-9b0b-4d82-bfa2-656c4986fb18-kube-api-access-djqvp\") pod \"ceilometer-0\" (UID: \"d127bade-9b0b-4d82-bfa2-656c4986fb18\") " pod="openstack/ceilometer-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.593243 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83b4acbb-6ade-4bd9-9506-4a0a95829480-combined-ca-bundle\") pod \"aodh-0\" (UID: \"83b4acbb-6ade-4bd9-9506-4a0a95829480\") " pod="openstack/aodh-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.593269 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/380938eb-7cfc-4a1c-8710-bc2279ca6b82-config-volume\") pod \"collect-profiles-29403285-qjtgm\" (UID: \"380938eb-7cfc-4a1c-8710-bc2279ca6b82\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403285-qjtgm" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.593303 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d127bade-9b0b-4d82-bfa2-656c4986fb18-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d127bade-9b0b-4d82-bfa2-656c4986fb18\") " pod="openstack/ceilometer-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.677129 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-6hzbx" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.694757 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83b4acbb-6ade-4bd9-9506-4a0a95829480-scripts\") pod \"aodh-0\" (UID: \"83b4acbb-6ade-4bd9-9506-4a0a95829480\") " pod="openstack/aodh-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.694810 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/380938eb-7cfc-4a1c-8710-bc2279ca6b82-secret-volume\") pod \"collect-profiles-29403285-qjtgm\" (UID: \"380938eb-7cfc-4a1c-8710-bc2279ca6b82\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403285-qjtgm" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.694832 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83b4acbb-6ade-4bd9-9506-4a0a95829480-config-data\") pod \"aodh-0\" (UID: \"83b4acbb-6ade-4bd9-9506-4a0a95829480\") " pod="openstack/aodh-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.694864 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d127bade-9b0b-4d82-bfa2-656c4986fb18-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d127bade-9b0b-4d82-bfa2-656c4986fb18\") " pod="openstack/ceilometer-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.694905 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fks25\" (UniqueName: \"kubernetes.io/projected/380938eb-7cfc-4a1c-8710-bc2279ca6b82-kube-api-access-fks25\") pod \"collect-profiles-29403285-qjtgm\" (UID: \"380938eb-7cfc-4a1c-8710-bc2279ca6b82\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403285-qjtgm" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.694922 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d127bade-9b0b-4d82-bfa2-656c4986fb18-config-data\") pod \"ceilometer-0\" (UID: \"d127bade-9b0b-4d82-bfa2-656c4986fb18\") " pod="openstack/ceilometer-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.694937 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d127bade-9b0b-4d82-bfa2-656c4986fb18-run-httpd\") pod \"ceilometer-0\" (UID: \"d127bade-9b0b-4d82-bfa2-656c4986fb18\") " pod="openstack/ceilometer-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.694953 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/83b4acbb-6ade-4bd9-9506-4a0a95829480-internal-tls-certs\") pod \"aodh-0\" (UID: \"83b4acbb-6ade-4bd9-9506-4a0a95829480\") " pod="openstack/aodh-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.694970 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dfbt5\" (UniqueName: \"kubernetes.io/projected/83b4acbb-6ade-4bd9-9506-4a0a95829480-kube-api-access-dfbt5\") pod \"aodh-0\" (UID: \"83b4acbb-6ade-4bd9-9506-4a0a95829480\") " pod="openstack/aodh-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.694988 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d127bade-9b0b-4d82-bfa2-656c4986fb18-scripts\") pod \"ceilometer-0\" (UID: \"d127bade-9b0b-4d82-bfa2-656c4986fb18\") " pod="openstack/ceilometer-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.695002 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/83b4acbb-6ade-4bd9-9506-4a0a95829480-public-tls-certs\") pod \"aodh-0\" (UID: \"83b4acbb-6ade-4bd9-9506-4a0a95829480\") " pod="openstack/aodh-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.695036 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d127bade-9b0b-4d82-bfa2-656c4986fb18-log-httpd\") pod \"ceilometer-0\" (UID: \"d127bade-9b0b-4d82-bfa2-656c4986fb18\") " pod="openstack/ceilometer-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.695102 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djqvp\" (UniqueName: \"kubernetes.io/projected/d127bade-9b0b-4d82-bfa2-656c4986fb18-kube-api-access-djqvp\") pod \"ceilometer-0\" (UID: \"d127bade-9b0b-4d82-bfa2-656c4986fb18\") " pod="openstack/ceilometer-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.695120 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83b4acbb-6ade-4bd9-9506-4a0a95829480-combined-ca-bundle\") pod \"aodh-0\" (UID: \"83b4acbb-6ade-4bd9-9506-4a0a95829480\") " pod="openstack/aodh-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.695143 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/380938eb-7cfc-4a1c-8710-bc2279ca6b82-config-volume\") pod \"collect-profiles-29403285-qjtgm\" (UID: \"380938eb-7cfc-4a1c-8710-bc2279ca6b82\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403285-qjtgm" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.695182 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d127bade-9b0b-4d82-bfa2-656c4986fb18-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d127bade-9b0b-4d82-bfa2-656c4986fb18\") " pod="openstack/ceilometer-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.700114 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d127bade-9b0b-4d82-bfa2-656c4986fb18-log-httpd\") pod \"ceilometer-0\" (UID: \"d127bade-9b0b-4d82-bfa2-656c4986fb18\") " pod="openstack/ceilometer-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.705853 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/83b4acbb-6ade-4bd9-9506-4a0a95829480-internal-tls-certs\") pod \"aodh-0\" (UID: \"83b4acbb-6ade-4bd9-9506-4a0a95829480\") " pod="openstack/aodh-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.707331 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d127bade-9b0b-4d82-bfa2-656c4986fb18-run-httpd\") pod \"ceilometer-0\" (UID: \"d127bade-9b0b-4d82-bfa2-656c4986fb18\") " pod="openstack/ceilometer-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.708314 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d127bade-9b0b-4d82-bfa2-656c4986fb18-scripts\") pod \"ceilometer-0\" (UID: \"d127bade-9b0b-4d82-bfa2-656c4986fb18\") " pod="openstack/ceilometer-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.709016 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/380938eb-7cfc-4a1c-8710-bc2279ca6b82-config-volume\") pod \"collect-profiles-29403285-qjtgm\" (UID: \"380938eb-7cfc-4a1c-8710-bc2279ca6b82\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403285-qjtgm" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.710398 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d127bade-9b0b-4d82-bfa2-656c4986fb18-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d127bade-9b0b-4d82-bfa2-656c4986fb18\") " pod="openstack/ceilometer-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.722167 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-n7krq" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.723338 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d127bade-9b0b-4d82-bfa2-656c4986fb18-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d127bade-9b0b-4d82-bfa2-656c4986fb18\") " pod="openstack/ceilometer-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.723445 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/380938eb-7cfc-4a1c-8710-bc2279ca6b82-secret-volume\") pod \"collect-profiles-29403285-qjtgm\" (UID: \"380938eb-7cfc-4a1c-8710-bc2279ca6b82\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403285-qjtgm" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.723874 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/83b4acbb-6ade-4bd9-9506-4a0a95829480-public-tls-certs\") pod \"aodh-0\" (UID: \"83b4acbb-6ade-4bd9-9506-4a0a95829480\") " pod="openstack/aodh-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.723931 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djqvp\" (UniqueName: \"kubernetes.io/projected/d127bade-9b0b-4d82-bfa2-656c4986fb18-kube-api-access-djqvp\") pod \"ceilometer-0\" (UID: \"d127bade-9b0b-4d82-bfa2-656c4986fb18\") " pod="openstack/ceilometer-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.724449 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-hvxfr" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.727069 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dfbt5\" (UniqueName: \"kubernetes.io/projected/83b4acbb-6ade-4bd9-9506-4a0a95829480-kube-api-access-dfbt5\") pod \"aodh-0\" (UID: \"83b4acbb-6ade-4bd9-9506-4a0a95829480\") " pod="openstack/aodh-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.727575 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83b4acbb-6ade-4bd9-9506-4a0a95829480-combined-ca-bundle\") pod \"aodh-0\" (UID: \"83b4acbb-6ade-4bd9-9506-4a0a95829480\") " pod="openstack/aodh-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.728349 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83b4acbb-6ade-4bd9-9506-4a0a95829480-scripts\") pod \"aodh-0\" (UID: \"83b4acbb-6ade-4bd9-9506-4a0a95829480\") " pod="openstack/aodh-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.735928 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83b4acbb-6ade-4bd9-9506-4a0a95829480-config-data\") pod \"aodh-0\" (UID: \"83b4acbb-6ade-4bd9-9506-4a0a95829480\") " pod="openstack/aodh-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.736005 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d127bade-9b0b-4d82-bfa2-656c4986fb18-config-data\") pod \"ceilometer-0\" (UID: \"d127bade-9b0b-4d82-bfa2-656c4986fb18\") " pod="openstack/ceilometer-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.737109 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-shqxg" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.754374 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fks25\" (UniqueName: \"kubernetes.io/projected/380938eb-7cfc-4a1c-8710-bc2279ca6b82-kube-api-access-fks25\") pod \"collect-profiles-29403285-qjtgm\" (UID: \"380938eb-7cfc-4a1c-8710-bc2279ca6b82\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403285-qjtgm" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.824327 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.826947 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.842032 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-v4b66" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.852222 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 26 22:46:08 crc kubenswrapper[4903]: I1126 22:46:08.863386 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403285-qjtgm" Nov 26 22:46:09 crc kubenswrapper[4903]: I1126 22:46:09.027108 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-t5gqj" Nov 26 22:46:09 crc kubenswrapper[4903]: I1126 22:46:09.058682 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5kmlf" Nov 26 22:46:09 crc kubenswrapper[4903]: I1126 22:46:09.088312 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-vj562" Nov 26 22:46:09 crc kubenswrapper[4903]: I1126 22:46:09.103236 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-kxg8s" Nov 26 22:46:09 crc kubenswrapper[4903]: I1126 22:46:09.130084 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vjt6h" Nov 26 22:46:09 crc kubenswrapper[4903]: I1126 22:46:09.146807 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vjt6h" Nov 26 22:46:09 crc kubenswrapper[4903]: I1126 22:46:09.192135 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jn49q" Nov 26 22:46:09 crc kubenswrapper[4903]: I1126 22:46:09.200346 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-jn49q" Nov 26 22:46:09 crc kubenswrapper[4903]: I1126 22:46:09.526784 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:46:09 crc kubenswrapper[4903]: W1126 22:46:09.532428 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd127bade_9b0b_4d82_bfa2_656c4986fb18.slice/crio-5cd91f1c22345cac742578da141c236d57ede672aadfca5dff2dabf3f0f5afc9 WatchSource:0}: Error finding container 5cd91f1c22345cac742578da141c236d57ede672aadfca5dff2dabf3f0f5afc9: Status 404 returned error can't find the container with id 5cd91f1c22345cac742578da141c236d57ede672aadfca5dff2dabf3f0f5afc9 Nov 26 22:46:09 crc kubenswrapper[4903]: I1126 22:46:09.543080 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-d77b94747-2h7mb" Nov 26 22:46:09 crc kubenswrapper[4903]: I1126 22:46:09.586637 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d127bade-9b0b-4d82-bfa2-656c4986fb18","Type":"ContainerStarted","Data":"5cd91f1c22345cac742578da141c236d57ede672aadfca5dff2dabf3f0f5afc9"} Nov 26 22:46:09 crc kubenswrapper[4903]: W1126 22:46:09.613246 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod380938eb_7cfc_4a1c_8710_bc2279ca6b82.slice/crio-3dc3adf45e5cddd9be2fc4ce8988eed83f88fc12c9e7d3524e1291d32c7e2422 WatchSource:0}: Error finding container 3dc3adf45e5cddd9be2fc4ce8988eed83f88fc12c9e7d3524e1291d32c7e2422: Status 404 returned error can't find the container with id 3dc3adf45e5cddd9be2fc4ce8988eed83f88fc12c9e7d3524e1291d32c7e2422 Nov 26 22:46:09 crc kubenswrapper[4903]: I1126 22:46:09.620397 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 26 22:46:09 crc kubenswrapper[4903]: I1126 22:46:09.640164 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403285-qjtgm"] Nov 26 22:46:09 crc kubenswrapper[4903]: I1126 22:46:09.952394 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-ljln7" Nov 26 22:46:10 crc kubenswrapper[4903]: I1126 22:46:10.045978 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="05d39c3e-b34f-42ae-ad74-b18e1fd0fced" path="/var/lib/kubelet/pods/05d39c3e-b34f-42ae-ad74-b18e1fd0fced/volumes" Nov 26 22:46:10 crc kubenswrapper[4903]: I1126 22:46:10.047223 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6253c12a-e41a-4476-abda-3b3b7ff084b3" path="/var/lib/kubelet/pods/6253c12a-e41a-4476-abda-3b3b7ff084b3/volumes" Nov 26 22:46:10 crc kubenswrapper[4903]: I1126 22:46:10.629660 4903 generic.go:334] "Generic (PLEG): container finished" podID="380938eb-7cfc-4a1c-8710-bc2279ca6b82" containerID="992cc963df3f71ef88966f1cfbd1be25ac845ce8c8601ee917b68f5d10de91ba" exitCode=0 Nov 26 22:46:10 crc kubenswrapper[4903]: I1126 22:46:10.629893 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403285-qjtgm" event={"ID":"380938eb-7cfc-4a1c-8710-bc2279ca6b82","Type":"ContainerDied","Data":"992cc963df3f71ef88966f1cfbd1be25ac845ce8c8601ee917b68f5d10de91ba"} Nov 26 22:46:10 crc kubenswrapper[4903]: I1126 22:46:10.629945 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403285-qjtgm" event={"ID":"380938eb-7cfc-4a1c-8710-bc2279ca6b82","Type":"ContainerStarted","Data":"3dc3adf45e5cddd9be2fc4ce8988eed83f88fc12c9e7d3524e1291d32c7e2422"} Nov 26 22:46:10 crc kubenswrapper[4903]: I1126 22:46:10.634764 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d127bade-9b0b-4d82-bfa2-656c4986fb18","Type":"ContainerStarted","Data":"9f5decf06477c1a1398359f59a8d8eaf18c91645d99a60a157eb9ab186174c72"} Nov 26 22:46:10 crc kubenswrapper[4903]: I1126 22:46:10.644043 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"83b4acbb-6ade-4bd9-9506-4a0a95829480","Type":"ContainerStarted","Data":"07aa88fbd138008906bb8b408c2b6e39c86a742c7d941e733536fd097b8c9fc8"} Nov 26 22:46:10 crc kubenswrapper[4903]: I1126 22:46:10.644088 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"83b4acbb-6ade-4bd9-9506-4a0a95829480","Type":"ContainerStarted","Data":"5af0ab450328c72f31506a298fedfb7bd933b988cc10c84abfd5f705ecb72309"} Nov 26 22:46:11 crc kubenswrapper[4903]: I1126 22:46:11.656530 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d127bade-9b0b-4d82-bfa2-656c4986fb18","Type":"ContainerStarted","Data":"ed24b4025e554b205a1321eedb974bd4833df91dc4f099875f6089477db8e620"} Nov 26 22:46:11 crc kubenswrapper[4903]: I1126 22:46:11.658854 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"83b4acbb-6ade-4bd9-9506-4a0a95829480","Type":"ContainerStarted","Data":"838928e083bc7d4d767dd84268306d7317180ad54fab3966b563aebea4741396"} Nov 26 22:46:11 crc kubenswrapper[4903]: I1126 22:46:11.824579 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"alertmanager-trusted-ca-bundle" Nov 26 22:46:12 crc kubenswrapper[4903]: I1126 22:46:12.081805 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403285-qjtgm" Nov 26 22:46:12 crc kubenswrapper[4903]: I1126 22:46:12.109717 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fks25\" (UniqueName: \"kubernetes.io/projected/380938eb-7cfc-4a1c-8710-bc2279ca6b82-kube-api-access-fks25\") pod \"380938eb-7cfc-4a1c-8710-bc2279ca6b82\" (UID: \"380938eb-7cfc-4a1c-8710-bc2279ca6b82\") " Nov 26 22:46:12 crc kubenswrapper[4903]: I1126 22:46:12.109774 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/380938eb-7cfc-4a1c-8710-bc2279ca6b82-config-volume\") pod \"380938eb-7cfc-4a1c-8710-bc2279ca6b82\" (UID: \"380938eb-7cfc-4a1c-8710-bc2279ca6b82\") " Nov 26 22:46:12 crc kubenswrapper[4903]: I1126 22:46:12.109831 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/380938eb-7cfc-4a1c-8710-bc2279ca6b82-secret-volume\") pod \"380938eb-7cfc-4a1c-8710-bc2279ca6b82\" (UID: \"380938eb-7cfc-4a1c-8710-bc2279ca6b82\") " Nov 26 22:46:12 crc kubenswrapper[4903]: I1126 22:46:12.131132 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/380938eb-7cfc-4a1c-8710-bc2279ca6b82-config-volume" (OuterVolumeSpecName: "config-volume") pod "380938eb-7cfc-4a1c-8710-bc2279ca6b82" (UID: "380938eb-7cfc-4a1c-8710-bc2279ca6b82"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:46:12 crc kubenswrapper[4903]: I1126 22:46:12.141135 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/380938eb-7cfc-4a1c-8710-bc2279ca6b82-kube-api-access-fks25" (OuterVolumeSpecName: "kube-api-access-fks25") pod "380938eb-7cfc-4a1c-8710-bc2279ca6b82" (UID: "380938eb-7cfc-4a1c-8710-bc2279ca6b82"). InnerVolumeSpecName "kube-api-access-fks25". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:46:12 crc kubenswrapper[4903]: I1126 22:46:12.164852 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/380938eb-7cfc-4a1c-8710-bc2279ca6b82-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "380938eb-7cfc-4a1c-8710-bc2279ca6b82" (UID: "380938eb-7cfc-4a1c-8710-bc2279ca6b82"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:46:12 crc kubenswrapper[4903]: I1126 22:46:12.213266 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fks25\" (UniqueName: \"kubernetes.io/projected/380938eb-7cfc-4a1c-8710-bc2279ca6b82-kube-api-access-fks25\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:12 crc kubenswrapper[4903]: I1126 22:46:12.213305 4903 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/380938eb-7cfc-4a1c-8710-bc2279ca6b82-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:12 crc kubenswrapper[4903]: I1126 22:46:12.213315 4903 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/380938eb-7cfc-4a1c-8710-bc2279ca6b82-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:12 crc kubenswrapper[4903]: I1126 22:46:12.711028 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403285-qjtgm" event={"ID":"380938eb-7cfc-4a1c-8710-bc2279ca6b82","Type":"ContainerDied","Data":"3dc3adf45e5cddd9be2fc4ce8988eed83f88fc12c9e7d3524e1291d32c7e2422"} Nov 26 22:46:12 crc kubenswrapper[4903]: I1126 22:46:12.711459 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3dc3adf45e5cddd9be2fc4ce8988eed83f88fc12c9e7d3524e1291d32c7e2422" Nov 26 22:46:12 crc kubenswrapper[4903]: I1126 22:46:12.711525 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403285-qjtgm" Nov 26 22:46:12 crc kubenswrapper[4903]: I1126 22:46:12.736005 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d127bade-9b0b-4d82-bfa2-656c4986fb18","Type":"ContainerStarted","Data":"2e561a48010bb1b1c1e5906941a26e196895b38af9c2e7719a8cb2c80ba55195"} Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.408050 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-ljrkr"] Nov 26 22:46:13 crc kubenswrapper[4903]: E1126 22:46:13.408862 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="380938eb-7cfc-4a1c-8710-bc2279ca6b82" containerName="collect-profiles" Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.408881 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="380938eb-7cfc-4a1c-8710-bc2279ca6b82" containerName="collect-profiles" Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.409134 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="380938eb-7cfc-4a1c-8710-bc2279ca6b82" containerName="collect-profiles" Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.410856 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ljrkr" Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.443969 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ljrkr"] Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.562463 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e57af0e-bec3-4e7a-9644-52a660107434-utilities\") pod \"certified-operators-ljrkr\" (UID: \"5e57af0e-bec3-4e7a-9644-52a660107434\") " pod="openshift-marketplace/certified-operators-ljrkr" Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.562915 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e57af0e-bec3-4e7a-9644-52a660107434-catalog-content\") pod \"certified-operators-ljrkr\" (UID: \"5e57af0e-bec3-4e7a-9644-52a660107434\") " pod="openshift-marketplace/certified-operators-ljrkr" Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.562962 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vzpxk\" (UniqueName: \"kubernetes.io/projected/5e57af0e-bec3-4e7a-9644-52a660107434-kube-api-access-vzpxk\") pod \"certified-operators-ljrkr\" (UID: \"5e57af0e-bec3-4e7a-9644-52a660107434\") " pod="openshift-marketplace/certified-operators-ljrkr" Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.611753 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-29lbn"] Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.614182 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-29lbn" Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.626264 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-29lbn"] Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.668140 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e57af0e-bec3-4e7a-9644-52a660107434-catalog-content\") pod \"certified-operators-ljrkr\" (UID: \"5e57af0e-bec3-4e7a-9644-52a660107434\") " pod="openshift-marketplace/certified-operators-ljrkr" Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.668197 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vzpxk\" (UniqueName: \"kubernetes.io/projected/5e57af0e-bec3-4e7a-9644-52a660107434-kube-api-access-vzpxk\") pod \"certified-operators-ljrkr\" (UID: \"5e57af0e-bec3-4e7a-9644-52a660107434\") " pod="openshift-marketplace/certified-operators-ljrkr" Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.668284 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e57af0e-bec3-4e7a-9644-52a660107434-utilities\") pod \"certified-operators-ljrkr\" (UID: \"5e57af0e-bec3-4e7a-9644-52a660107434\") " pod="openshift-marketplace/certified-operators-ljrkr" Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.668610 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e57af0e-bec3-4e7a-9644-52a660107434-catalog-content\") pod \"certified-operators-ljrkr\" (UID: \"5e57af0e-bec3-4e7a-9644-52a660107434\") " pod="openshift-marketplace/certified-operators-ljrkr" Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.668625 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e57af0e-bec3-4e7a-9644-52a660107434-utilities\") pod \"certified-operators-ljrkr\" (UID: \"5e57af0e-bec3-4e7a-9644-52a660107434\") " pod="openshift-marketplace/certified-operators-ljrkr" Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.690499 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vzpxk\" (UniqueName: \"kubernetes.io/projected/5e57af0e-bec3-4e7a-9644-52a660107434-kube-api-access-vzpxk\") pod \"certified-operators-ljrkr\" (UID: \"5e57af0e-bec3-4e7a-9644-52a660107434\") " pod="openshift-marketplace/certified-operators-ljrkr" Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.754377 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"83b4acbb-6ade-4bd9-9506-4a0a95829480","Type":"ContainerStarted","Data":"e153cc7373dc20867bbaa6f0803f381f4b712ec576adda56947e43428d81ed4d"} Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.755364 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"83b4acbb-6ade-4bd9-9506-4a0a95829480","Type":"ContainerStarted","Data":"f626787be4ced39e2e9fba37b148c92cb7728b70e29552779650c4dd06e40a83"} Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.771056 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b4ba05d-5933-445a-aa3a-6c7766b73ebc-utilities\") pod \"community-operators-29lbn\" (UID: \"0b4ba05d-5933-445a-aa3a-6c7766b73ebc\") " pod="openshift-marketplace/community-operators-29lbn" Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.771133 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nfscv\" (UniqueName: \"kubernetes.io/projected/0b4ba05d-5933-445a-aa3a-6c7766b73ebc-kube-api-access-nfscv\") pod \"community-operators-29lbn\" (UID: \"0b4ba05d-5933-445a-aa3a-6c7766b73ebc\") " pod="openshift-marketplace/community-operators-29lbn" Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.771555 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b4ba05d-5933-445a-aa3a-6c7766b73ebc-catalog-content\") pod \"community-operators-29lbn\" (UID: \"0b4ba05d-5933-445a-aa3a-6c7766b73ebc\") " pod="openshift-marketplace/community-operators-29lbn" Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.789543 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=30.981143044 podStartE2EDuration="34.789495965s" podCreationTimestamp="2025-11-26 22:45:39 +0000 UTC" firstStartedPulling="2025-11-26 22:46:09.60550919 +0000 UTC m=+1498.295744110" lastFinishedPulling="2025-11-26 22:46:13.413862131 +0000 UTC m=+1502.104097031" observedRunningTime="2025-11-26 22:46:13.77623107 +0000 UTC m=+1502.466465980" watchObservedRunningTime="2025-11-26 22:46:13.789495965 +0000 UTC m=+1502.479730875" Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.873741 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b4ba05d-5933-445a-aa3a-6c7766b73ebc-catalog-content\") pod \"community-operators-29lbn\" (UID: \"0b4ba05d-5933-445a-aa3a-6c7766b73ebc\") " pod="openshift-marketplace/community-operators-29lbn" Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.874102 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b4ba05d-5933-445a-aa3a-6c7766b73ebc-utilities\") pod \"community-operators-29lbn\" (UID: \"0b4ba05d-5933-445a-aa3a-6c7766b73ebc\") " pod="openshift-marketplace/community-operators-29lbn" Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.874158 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nfscv\" (UniqueName: \"kubernetes.io/projected/0b4ba05d-5933-445a-aa3a-6c7766b73ebc-kube-api-access-nfscv\") pod \"community-operators-29lbn\" (UID: \"0b4ba05d-5933-445a-aa3a-6c7766b73ebc\") " pod="openshift-marketplace/community-operators-29lbn" Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.874330 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b4ba05d-5933-445a-aa3a-6c7766b73ebc-catalog-content\") pod \"community-operators-29lbn\" (UID: \"0b4ba05d-5933-445a-aa3a-6c7766b73ebc\") " pod="openshift-marketplace/community-operators-29lbn" Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.874638 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b4ba05d-5933-445a-aa3a-6c7766b73ebc-utilities\") pod \"community-operators-29lbn\" (UID: \"0b4ba05d-5933-445a-aa3a-6c7766b73ebc\") " pod="openshift-marketplace/community-operators-29lbn" Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.884594 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ljrkr" Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.893042 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nfscv\" (UniqueName: \"kubernetes.io/projected/0b4ba05d-5933-445a-aa3a-6c7766b73ebc-kube-api-access-nfscv\") pod \"community-operators-29lbn\" (UID: \"0b4ba05d-5933-445a-aa3a-6c7766b73ebc\") " pod="openshift-marketplace/community-operators-29lbn" Nov 26 22:46:13 crc kubenswrapper[4903]: I1126 22:46:13.963769 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-29lbn" Nov 26 22:46:14 crc kubenswrapper[4903]: I1126 22:46:14.672350 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Nov 26 22:46:14 crc kubenswrapper[4903]: I1126 22:46:14.726499 4903 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 26 22:46:14 crc kubenswrapper[4903]: I1126 22:46:14.727062 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://9955553cedd8a77216e8c23eb0fc72868056b51a3deb242c30a0129a6a422bb4" gracePeriod=5 Nov 26 22:46:14 crc kubenswrapper[4903]: I1126 22:46:14.803008 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ljrkr"] Nov 26 22:46:14 crc kubenswrapper[4903]: W1126 22:46:14.804265 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5e57af0e_bec3_4e7a_9644_52a660107434.slice/crio-d327196e5f9dfcb09980cc3a644b88ff269de32c732e67b6b1c109b41a683b98 WatchSource:0}: Error finding container d327196e5f9dfcb09980cc3a644b88ff269de32c732e67b6b1c109b41a683b98: Status 404 returned error can't find the container with id d327196e5f9dfcb09980cc3a644b88ff269de32c732e67b6b1c109b41a683b98 Nov 26 22:46:14 crc kubenswrapper[4903]: I1126 22:46:14.821007 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d127bade-9b0b-4d82-bfa2-656c4986fb18","Type":"ContainerStarted","Data":"b90763be8dc1a5dec32c3559f4c9233145c6c3339820d2dbd95db568a03f79fa"} Nov 26 22:46:14 crc kubenswrapper[4903]: I1126 22:46:14.827334 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 22:46:14 crc kubenswrapper[4903]: I1126 22:46:14.872520 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=31.371971486 podStartE2EDuration="35.872497683s" podCreationTimestamp="2025-11-26 22:45:39 +0000 UTC" firstStartedPulling="2025-11-26 22:46:09.534236631 +0000 UTC m=+1498.224471531" lastFinishedPulling="2025-11-26 22:46:14.034762818 +0000 UTC m=+1502.724997728" observedRunningTime="2025-11-26 22:46:14.850958196 +0000 UTC m=+1503.541193096" watchObservedRunningTime="2025-11-26 22:46:14.872497683 +0000 UTC m=+1503.562732593" Nov 26 22:46:14 crc kubenswrapper[4903]: I1126 22:46:14.967987 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-29lbn"] Nov 26 22:46:15 crc kubenswrapper[4903]: I1126 22:46:15.840752 4903 generic.go:334] "Generic (PLEG): container finished" podID="0b4ba05d-5933-445a-aa3a-6c7766b73ebc" containerID="02b8a834f9b7176cf6444e1b2ba635103fafcc025b9e2940a23642451199c9a6" exitCode=0 Nov 26 22:46:15 crc kubenswrapper[4903]: I1126 22:46:15.840818 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-29lbn" event={"ID":"0b4ba05d-5933-445a-aa3a-6c7766b73ebc","Type":"ContainerDied","Data":"02b8a834f9b7176cf6444e1b2ba635103fafcc025b9e2940a23642451199c9a6"} Nov 26 22:46:15 crc kubenswrapper[4903]: I1126 22:46:15.841226 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-29lbn" event={"ID":"0b4ba05d-5933-445a-aa3a-6c7766b73ebc","Type":"ContainerStarted","Data":"38c9881a1e414971837122377661bd65b5071746b64e74d09438cbc28197a71f"} Nov 26 22:46:15 crc kubenswrapper[4903]: I1126 22:46:15.843795 4903 generic.go:334] "Generic (PLEG): container finished" podID="5e57af0e-bec3-4e7a-9644-52a660107434" containerID="b1967cb25036e804c7f3394eb30e15d3f546a1546f26fb0aa1fc2ce5dc2920d8" exitCode=0 Nov 26 22:46:15 crc kubenswrapper[4903]: I1126 22:46:15.843877 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ljrkr" event={"ID":"5e57af0e-bec3-4e7a-9644-52a660107434","Type":"ContainerDied","Data":"b1967cb25036e804c7f3394eb30e15d3f546a1546f26fb0aa1fc2ce5dc2920d8"} Nov 26 22:46:15 crc kubenswrapper[4903]: I1126 22:46:15.843930 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ljrkr" event={"ID":"5e57af0e-bec3-4e7a-9644-52a660107434","Type":"ContainerStarted","Data":"d327196e5f9dfcb09980cc3a644b88ff269de32c732e67b6b1c109b41a683b98"} Nov 26 22:46:16 crc kubenswrapper[4903]: I1126 22:46:16.392096 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4qfmw"] Nov 26 22:46:16 crc kubenswrapper[4903]: E1126 22:46:16.392686 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 26 22:46:16 crc kubenswrapper[4903]: I1126 22:46:16.392713 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 26 22:46:16 crc kubenswrapper[4903]: I1126 22:46:16.393049 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 26 22:46:16 crc kubenswrapper[4903]: I1126 22:46:16.394785 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4qfmw" Nov 26 22:46:16 crc kubenswrapper[4903]: I1126 22:46:16.400418 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4qfmw"] Nov 26 22:46:16 crc kubenswrapper[4903]: I1126 22:46:16.541092 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6590af4f-61f2-496f-b13e-368db5637b6a-utilities\") pod \"redhat-marketplace-4qfmw\" (UID: \"6590af4f-61f2-496f-b13e-368db5637b6a\") " pod="openshift-marketplace/redhat-marketplace-4qfmw" Nov 26 22:46:16 crc kubenswrapper[4903]: I1126 22:46:16.541345 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xqs24\" (UniqueName: \"kubernetes.io/projected/6590af4f-61f2-496f-b13e-368db5637b6a-kube-api-access-xqs24\") pod \"redhat-marketplace-4qfmw\" (UID: \"6590af4f-61f2-496f-b13e-368db5637b6a\") " pod="openshift-marketplace/redhat-marketplace-4qfmw" Nov 26 22:46:16 crc kubenswrapper[4903]: I1126 22:46:16.541384 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6590af4f-61f2-496f-b13e-368db5637b6a-catalog-content\") pod \"redhat-marketplace-4qfmw\" (UID: \"6590af4f-61f2-496f-b13e-368db5637b6a\") " pod="openshift-marketplace/redhat-marketplace-4qfmw" Nov 26 22:46:16 crc kubenswrapper[4903]: I1126 22:46:16.623551 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 26 22:46:16 crc kubenswrapper[4903]: I1126 22:46:16.643818 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6590af4f-61f2-496f-b13e-368db5637b6a-catalog-content\") pod \"redhat-marketplace-4qfmw\" (UID: \"6590af4f-61f2-496f-b13e-368db5637b6a\") " pod="openshift-marketplace/redhat-marketplace-4qfmw" Nov 26 22:46:16 crc kubenswrapper[4903]: I1126 22:46:16.644059 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6590af4f-61f2-496f-b13e-368db5637b6a-utilities\") pod \"redhat-marketplace-4qfmw\" (UID: \"6590af4f-61f2-496f-b13e-368db5637b6a\") " pod="openshift-marketplace/redhat-marketplace-4qfmw" Nov 26 22:46:16 crc kubenswrapper[4903]: I1126 22:46:16.644087 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xqs24\" (UniqueName: \"kubernetes.io/projected/6590af4f-61f2-496f-b13e-368db5637b6a-kube-api-access-xqs24\") pod \"redhat-marketplace-4qfmw\" (UID: \"6590af4f-61f2-496f-b13e-368db5637b6a\") " pod="openshift-marketplace/redhat-marketplace-4qfmw" Nov 26 22:46:16 crc kubenswrapper[4903]: I1126 22:46:16.644217 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6590af4f-61f2-496f-b13e-368db5637b6a-catalog-content\") pod \"redhat-marketplace-4qfmw\" (UID: \"6590af4f-61f2-496f-b13e-368db5637b6a\") " pod="openshift-marketplace/redhat-marketplace-4qfmw" Nov 26 22:46:16 crc kubenswrapper[4903]: I1126 22:46:16.644429 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6590af4f-61f2-496f-b13e-368db5637b6a-utilities\") pod \"redhat-marketplace-4qfmw\" (UID: \"6590af4f-61f2-496f-b13e-368db5637b6a\") " pod="openshift-marketplace/redhat-marketplace-4qfmw" Nov 26 22:46:16 crc kubenswrapper[4903]: I1126 22:46:16.665185 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xqs24\" (UniqueName: \"kubernetes.io/projected/6590af4f-61f2-496f-b13e-368db5637b6a-kube-api-access-xqs24\") pod \"redhat-marketplace-4qfmw\" (UID: \"6590af4f-61f2-496f-b13e-368db5637b6a\") " pod="openshift-marketplace/redhat-marketplace-4qfmw" Nov 26 22:46:16 crc kubenswrapper[4903]: I1126 22:46:16.736791 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4qfmw" Nov 26 22:46:17 crc kubenswrapper[4903]: I1126 22:46:17.541628 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4qfmw"] Nov 26 22:46:17 crc kubenswrapper[4903]: I1126 22:46:17.946612 4903 generic.go:334] "Generic (PLEG): container finished" podID="6590af4f-61f2-496f-b13e-368db5637b6a" containerID="10c8ad63546a698d3d515d7049311e1730b01cd6149d42c919873dfe12b6bfa4" exitCode=0 Nov 26 22:46:17 crc kubenswrapper[4903]: I1126 22:46:17.946738 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4qfmw" event={"ID":"6590af4f-61f2-496f-b13e-368db5637b6a","Type":"ContainerDied","Data":"10c8ad63546a698d3d515d7049311e1730b01cd6149d42c919873dfe12b6bfa4"} Nov 26 22:46:17 crc kubenswrapper[4903]: I1126 22:46:17.946767 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4qfmw" event={"ID":"6590af4f-61f2-496f-b13e-368db5637b6a","Type":"ContainerStarted","Data":"b5495168d7be0521b494171b5fcb2c5f02a2fc155afabe4659611d90c88f6c34"} Nov 26 22:46:17 crc kubenswrapper[4903]: I1126 22:46:17.956798 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-29lbn" event={"ID":"0b4ba05d-5933-445a-aa3a-6c7766b73ebc","Type":"ContainerStarted","Data":"c611cda39ed17323c5f29dd391cd105464f60ccaf85dadf1a5aad875d2dcd908"} Nov 26 22:46:17 crc kubenswrapper[4903]: I1126 22:46:17.959777 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ljrkr" event={"ID":"5e57af0e-bec3-4e7a-9644-52a660107434","Type":"ContainerStarted","Data":"eb173b0b4a5cd223ca3257b52e912dbbb9d27bbdc0757840682d629e8caa6ad4"} Nov 26 22:46:18 crc kubenswrapper[4903]: I1126 22:46:18.389912 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 26 22:46:18 crc kubenswrapper[4903]: I1126 22:46:18.811189 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-x59hr" Nov 26 22:46:20 crc kubenswrapper[4903]: I1126 22:46:20.004420 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 26 22:46:20 crc kubenswrapper[4903]: I1126 22:46:20.004896 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 22:46:20 crc kubenswrapper[4903]: I1126 22:46:20.056622 4903 generic.go:334] "Generic (PLEG): container finished" podID="5e57af0e-bec3-4e7a-9644-52a660107434" containerID="eb173b0b4a5cd223ca3257b52e912dbbb9d27bbdc0757840682d629e8caa6ad4" exitCode=0 Nov 26 22:46:20 crc kubenswrapper[4903]: I1126 22:46:20.056680 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ljrkr" event={"ID":"5e57af0e-bec3-4e7a-9644-52a660107434","Type":"ContainerDied","Data":"eb173b0b4a5cd223ca3257b52e912dbbb9d27bbdc0757840682d629e8caa6ad4"} Nov 26 22:46:20 crc kubenswrapper[4903]: I1126 22:46:20.069156 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 26 22:46:20 crc kubenswrapper[4903]: I1126 22:46:20.069511 4903 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="9955553cedd8a77216e8c23eb0fc72868056b51a3deb242c30a0129a6a422bb4" exitCode=137 Nov 26 22:46:20 crc kubenswrapper[4903]: I1126 22:46:20.069644 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 22:46:20 crc kubenswrapper[4903]: I1126 22:46:20.069741 4903 scope.go:117] "RemoveContainer" containerID="9955553cedd8a77216e8c23eb0fc72868056b51a3deb242c30a0129a6a422bb4" Nov 26 22:46:20 crc kubenswrapper[4903]: I1126 22:46:20.073941 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4qfmw" event={"ID":"6590af4f-61f2-496f-b13e-368db5637b6a","Type":"ContainerStarted","Data":"07366f7fced382d2c633327e5e4f37121e8575ecbfcdddb60ddbddc0e7aeab46"} Nov 26 22:46:20 crc kubenswrapper[4903]: I1126 22:46:20.099718 4903 scope.go:117] "RemoveContainer" containerID="9955553cedd8a77216e8c23eb0fc72868056b51a3deb242c30a0129a6a422bb4" Nov 26 22:46:20 crc kubenswrapper[4903]: E1126 22:46:20.100208 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9955553cedd8a77216e8c23eb0fc72868056b51a3deb242c30a0129a6a422bb4\": container with ID starting with 9955553cedd8a77216e8c23eb0fc72868056b51a3deb242c30a0129a6a422bb4 not found: ID does not exist" containerID="9955553cedd8a77216e8c23eb0fc72868056b51a3deb242c30a0129a6a422bb4" Nov 26 22:46:20 crc kubenswrapper[4903]: I1126 22:46:20.100246 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9955553cedd8a77216e8c23eb0fc72868056b51a3deb242c30a0129a6a422bb4"} err="failed to get container status \"9955553cedd8a77216e8c23eb0fc72868056b51a3deb242c30a0129a6a422bb4\": rpc error: code = NotFound desc = could not find container \"9955553cedd8a77216e8c23eb0fc72868056b51a3deb242c30a0129a6a422bb4\": container with ID starting with 9955553cedd8a77216e8c23eb0fc72868056b51a3deb242c30a0129a6a422bb4 not found: ID does not exist" Nov 26 22:46:20 crc kubenswrapper[4903]: I1126 22:46:20.139453 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 22:46:20 crc kubenswrapper[4903]: I1126 22:46:20.139526 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 22:46:20 crc kubenswrapper[4903]: I1126 22:46:20.139543 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 22:46:20 crc kubenswrapper[4903]: I1126 22:46:20.139581 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 22:46:20 crc kubenswrapper[4903]: I1126 22:46:20.139667 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 22:46:20 crc kubenswrapper[4903]: I1126 22:46:20.139944 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:46:20 crc kubenswrapper[4903]: I1126 22:46:20.140017 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:46:20 crc kubenswrapper[4903]: I1126 22:46:20.140045 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:46:20 crc kubenswrapper[4903]: I1126 22:46:20.140069 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:46:20 crc kubenswrapper[4903]: I1126 22:46:20.140502 4903 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:20 crc kubenswrapper[4903]: I1126 22:46:20.140520 4903 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:20 crc kubenswrapper[4903]: I1126 22:46:20.140531 4903 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:20 crc kubenswrapper[4903]: I1126 22:46:20.140540 4903 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:20 crc kubenswrapper[4903]: I1126 22:46:20.149397 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 22:46:20 crc kubenswrapper[4903]: I1126 22:46:20.242963 4903 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:21 crc kubenswrapper[4903]: I1126 22:46:21.089754 4903 generic.go:334] "Generic (PLEG): container finished" podID="6590af4f-61f2-496f-b13e-368db5637b6a" containerID="07366f7fced382d2c633327e5e4f37121e8575ecbfcdddb60ddbddc0e7aeab46" exitCode=0 Nov 26 22:46:21 crc kubenswrapper[4903]: I1126 22:46:21.090166 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4qfmw" event={"ID":"6590af4f-61f2-496f-b13e-368db5637b6a","Type":"ContainerDied","Data":"07366f7fced382d2c633327e5e4f37121e8575ecbfcdddb60ddbddc0e7aeab46"} Nov 26 22:46:21 crc kubenswrapper[4903]: I1126 22:46:21.094472 4903 generic.go:334] "Generic (PLEG): container finished" podID="0b4ba05d-5933-445a-aa3a-6c7766b73ebc" containerID="c611cda39ed17323c5f29dd391cd105464f60ccaf85dadf1a5aad875d2dcd908" exitCode=0 Nov 26 22:46:21 crc kubenswrapper[4903]: I1126 22:46:21.094660 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-29lbn" event={"ID":"0b4ba05d-5933-445a-aa3a-6c7766b73ebc","Type":"ContainerDied","Data":"c611cda39ed17323c5f29dd391cd105464f60ccaf85dadf1a5aad875d2dcd908"} Nov 26 22:46:21 crc kubenswrapper[4903]: I1126 22:46:21.101710 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ljrkr" event={"ID":"5e57af0e-bec3-4e7a-9644-52a660107434","Type":"ContainerStarted","Data":"16cb83d6e3fcadd03a3c99e83c55d8e74bf01a902da25d470bf856279bc5f255"} Nov 26 22:46:21 crc kubenswrapper[4903]: I1126 22:46:21.169331 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-ljrkr" podStartSLOduration=3.432384059 podStartE2EDuration="8.169312029s" podCreationTimestamp="2025-11-26 22:46:13 +0000 UTC" firstStartedPulling="2025-11-26 22:46:15.845186816 +0000 UTC m=+1504.535421726" lastFinishedPulling="2025-11-26 22:46:20.582114786 +0000 UTC m=+1509.272349696" observedRunningTime="2025-11-26 22:46:21.148339227 +0000 UTC m=+1509.838574137" watchObservedRunningTime="2025-11-26 22:46:21.169312029 +0000 UTC m=+1509.859546939" Nov 26 22:46:21 crc kubenswrapper[4903]: I1126 22:46:21.179334 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-xssdp" Nov 26 22:46:21 crc kubenswrapper[4903]: I1126 22:46:21.495332 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" Nov 26 22:46:22 crc kubenswrapper[4903]: I1126 22:46:22.044495 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Nov 26 22:46:22 crc kubenswrapper[4903]: I1126 22:46:22.114661 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4qfmw" event={"ID":"6590af4f-61f2-496f-b13e-368db5637b6a","Type":"ContainerStarted","Data":"ad639b64c83455d65c7f43e3f79748ac1f31bd7fa8750abe5d57466157fea8b8"} Nov 26 22:46:22 crc kubenswrapper[4903]: I1126 22:46:22.144911 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4qfmw" podStartSLOduration=2.593864882 podStartE2EDuration="6.144891988s" podCreationTimestamp="2025-11-26 22:46:16 +0000 UTC" firstStartedPulling="2025-11-26 22:46:17.949300252 +0000 UTC m=+1506.639535162" lastFinishedPulling="2025-11-26 22:46:21.500327368 +0000 UTC m=+1510.190562268" observedRunningTime="2025-11-26 22:46:22.138086366 +0000 UTC m=+1510.828321266" watchObservedRunningTime="2025-11-26 22:46:22.144891988 +0000 UTC m=+1510.835126898" Nov 26 22:46:23 crc kubenswrapper[4903]: I1126 22:46:23.130562 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-29lbn" event={"ID":"0b4ba05d-5933-445a-aa3a-6c7766b73ebc","Type":"ContainerStarted","Data":"c64cfa475d004d95ce024f921923a3388a30f6f73eeb71768cf316463e5cce1f"} Nov 26 22:46:23 crc kubenswrapper[4903]: I1126 22:46:23.156356 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-29lbn" podStartSLOduration=4.091483028 podStartE2EDuration="10.156334218s" podCreationTimestamp="2025-11-26 22:46:13 +0000 UTC" firstStartedPulling="2025-11-26 22:46:15.842825122 +0000 UTC m=+1504.533060032" lastFinishedPulling="2025-11-26 22:46:21.907676322 +0000 UTC m=+1510.597911222" observedRunningTime="2025-11-26 22:46:23.1474338 +0000 UTC m=+1511.837668710" watchObservedRunningTime="2025-11-26 22:46:23.156334218 +0000 UTC m=+1511.846569128" Nov 26 22:46:23 crc kubenswrapper[4903]: I1126 22:46:23.886125 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-ljrkr" Nov 26 22:46:23 crc kubenswrapper[4903]: I1126 22:46:23.886175 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-ljrkr" Nov 26 22:46:23 crc kubenswrapper[4903]: I1126 22:46:23.964810 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-29lbn" Nov 26 22:46:23 crc kubenswrapper[4903]: I1126 22:46:23.965062 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-29lbn" Nov 26 22:46:24 crc kubenswrapper[4903]: I1126 22:46:24.939800 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-ljrkr" podUID="5e57af0e-bec3-4e7a-9644-52a660107434" containerName="registry-server" probeResult="failure" output=< Nov 26 22:46:24 crc kubenswrapper[4903]: timeout: failed to connect service ":50051" within 1s Nov 26 22:46:24 crc kubenswrapper[4903]: > Nov 26 22:46:25 crc kubenswrapper[4903]: I1126 22:46:25.023351 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-29lbn" podUID="0b4ba05d-5933-445a-aa3a-6c7766b73ebc" containerName="registry-server" probeResult="failure" output=< Nov 26 22:46:25 crc kubenswrapper[4903]: timeout: failed to connect service ":50051" within 1s Nov 26 22:46:25 crc kubenswrapper[4903]: > Nov 26 22:46:25 crc kubenswrapper[4903]: I1126 22:46:25.376204 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 26 22:46:26 crc kubenswrapper[4903]: I1126 22:46:26.003223 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 26 22:46:26 crc kubenswrapper[4903]: I1126 22:46:26.737150 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4qfmw" Nov 26 22:46:26 crc kubenswrapper[4903]: I1126 22:46:26.737357 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4qfmw" Nov 26 22:46:26 crc kubenswrapper[4903]: I1126 22:46:26.788010 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4qfmw" Nov 26 22:46:27 crc kubenswrapper[4903]: I1126 22:46:27.227750 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4qfmw" Nov 26 22:46:29 crc kubenswrapper[4903]: I1126 22:46:29.267427 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4qfmw"] Nov 26 22:46:29 crc kubenswrapper[4903]: I1126 22:46:29.852220 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bxpxf"] Nov 26 22:46:29 crc kubenswrapper[4903]: I1126 22:46:29.852446 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bxpxf" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" containerName="registry-server" containerID="cri-o://c59a336dd1d693b483d5ed67064dde4971aefe60414b1fb8ebef5380723e10f9" gracePeriod=2 Nov 26 22:46:30 crc kubenswrapper[4903]: I1126 22:46:30.216756 4903 generic.go:334] "Generic (PLEG): container finished" podID="23d7d0fe-46b7-41b2-b568-56f18f564748" containerID="c59a336dd1d693b483d5ed67064dde4971aefe60414b1fb8ebef5380723e10f9" exitCode=0 Nov 26 22:46:30 crc kubenswrapper[4903]: I1126 22:46:30.216979 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bxpxf" event={"ID":"23d7d0fe-46b7-41b2-b568-56f18f564748","Type":"ContainerDied","Data":"c59a336dd1d693b483d5ed67064dde4971aefe60414b1fb8ebef5380723e10f9"} Nov 26 22:46:30 crc kubenswrapper[4903]: I1126 22:46:30.217133 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-4qfmw" podUID="6590af4f-61f2-496f-b13e-368db5637b6a" containerName="registry-server" containerID="cri-o://ad639b64c83455d65c7f43e3f79748ac1f31bd7fa8750abe5d57466157fea8b8" gracePeriod=2 Nov 26 22:46:30 crc kubenswrapper[4903]: I1126 22:46:30.456411 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bxpxf" Nov 26 22:46:30 crc kubenswrapper[4903]: I1126 22:46:30.626171 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23d7d0fe-46b7-41b2-b568-56f18f564748-catalog-content\") pod \"23d7d0fe-46b7-41b2-b568-56f18f564748\" (UID: \"23d7d0fe-46b7-41b2-b568-56f18f564748\") " Nov 26 22:46:30 crc kubenswrapper[4903]: I1126 22:46:30.626345 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7q8jn\" (UniqueName: \"kubernetes.io/projected/23d7d0fe-46b7-41b2-b568-56f18f564748-kube-api-access-7q8jn\") pod \"23d7d0fe-46b7-41b2-b568-56f18f564748\" (UID: \"23d7d0fe-46b7-41b2-b568-56f18f564748\") " Nov 26 22:46:30 crc kubenswrapper[4903]: I1126 22:46:30.626533 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23d7d0fe-46b7-41b2-b568-56f18f564748-utilities\") pod \"23d7d0fe-46b7-41b2-b568-56f18f564748\" (UID: \"23d7d0fe-46b7-41b2-b568-56f18f564748\") " Nov 26 22:46:30 crc kubenswrapper[4903]: I1126 22:46:30.627786 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/23d7d0fe-46b7-41b2-b568-56f18f564748-utilities" (OuterVolumeSpecName: "utilities") pod "23d7d0fe-46b7-41b2-b568-56f18f564748" (UID: "23d7d0fe-46b7-41b2-b568-56f18f564748"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:46:30 crc kubenswrapper[4903]: I1126 22:46:30.633166 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23d7d0fe-46b7-41b2-b568-56f18f564748-kube-api-access-7q8jn" (OuterVolumeSpecName: "kube-api-access-7q8jn") pod "23d7d0fe-46b7-41b2-b568-56f18f564748" (UID: "23d7d0fe-46b7-41b2-b568-56f18f564748"). InnerVolumeSpecName "kube-api-access-7q8jn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:46:30 crc kubenswrapper[4903]: I1126 22:46:30.669994 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/23d7d0fe-46b7-41b2-b568-56f18f564748-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "23d7d0fe-46b7-41b2-b568-56f18f564748" (UID: "23d7d0fe-46b7-41b2-b568-56f18f564748"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:46:30 crc kubenswrapper[4903]: I1126 22:46:30.734355 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23d7d0fe-46b7-41b2-b568-56f18f564748-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:30 crc kubenswrapper[4903]: I1126 22:46:30.734386 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23d7d0fe-46b7-41b2-b568-56f18f564748-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:30 crc kubenswrapper[4903]: I1126 22:46:30.734399 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7q8jn\" (UniqueName: \"kubernetes.io/projected/23d7d0fe-46b7-41b2-b568-56f18f564748-kube-api-access-7q8jn\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:30 crc kubenswrapper[4903]: I1126 22:46:30.762890 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4qfmw" Nov 26 22:46:30 crc kubenswrapper[4903]: I1126 22:46:30.938172 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6590af4f-61f2-496f-b13e-368db5637b6a-utilities\") pod \"6590af4f-61f2-496f-b13e-368db5637b6a\" (UID: \"6590af4f-61f2-496f-b13e-368db5637b6a\") " Nov 26 22:46:30 crc kubenswrapper[4903]: I1126 22:46:30.938261 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6590af4f-61f2-496f-b13e-368db5637b6a-catalog-content\") pod \"6590af4f-61f2-496f-b13e-368db5637b6a\" (UID: \"6590af4f-61f2-496f-b13e-368db5637b6a\") " Nov 26 22:46:30 crc kubenswrapper[4903]: I1126 22:46:30.938392 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xqs24\" (UniqueName: \"kubernetes.io/projected/6590af4f-61f2-496f-b13e-368db5637b6a-kube-api-access-xqs24\") pod \"6590af4f-61f2-496f-b13e-368db5637b6a\" (UID: \"6590af4f-61f2-496f-b13e-368db5637b6a\") " Nov 26 22:46:30 crc kubenswrapper[4903]: I1126 22:46:30.938810 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6590af4f-61f2-496f-b13e-368db5637b6a-utilities" (OuterVolumeSpecName: "utilities") pod "6590af4f-61f2-496f-b13e-368db5637b6a" (UID: "6590af4f-61f2-496f-b13e-368db5637b6a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:46:30 crc kubenswrapper[4903]: I1126 22:46:30.938973 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6590af4f-61f2-496f-b13e-368db5637b6a-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:30 crc kubenswrapper[4903]: I1126 22:46:30.942212 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6590af4f-61f2-496f-b13e-368db5637b6a-kube-api-access-xqs24" (OuterVolumeSpecName: "kube-api-access-xqs24") pod "6590af4f-61f2-496f-b13e-368db5637b6a" (UID: "6590af4f-61f2-496f-b13e-368db5637b6a"). InnerVolumeSpecName "kube-api-access-xqs24". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:46:30 crc kubenswrapper[4903]: I1126 22:46:30.965801 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6590af4f-61f2-496f-b13e-368db5637b6a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6590af4f-61f2-496f-b13e-368db5637b6a" (UID: "6590af4f-61f2-496f-b13e-368db5637b6a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:46:31 crc kubenswrapper[4903]: I1126 22:46:31.041028 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xqs24\" (UniqueName: \"kubernetes.io/projected/6590af4f-61f2-496f-b13e-368db5637b6a-kube-api-access-xqs24\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:31 crc kubenswrapper[4903]: I1126 22:46:31.041058 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6590af4f-61f2-496f-b13e-368db5637b6a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:31 crc kubenswrapper[4903]: I1126 22:46:31.228962 4903 generic.go:334] "Generic (PLEG): container finished" podID="6590af4f-61f2-496f-b13e-368db5637b6a" containerID="ad639b64c83455d65c7f43e3f79748ac1f31bd7fa8750abe5d57466157fea8b8" exitCode=0 Nov 26 22:46:31 crc kubenswrapper[4903]: I1126 22:46:31.229020 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4qfmw" Nov 26 22:46:31 crc kubenswrapper[4903]: I1126 22:46:31.229005 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4qfmw" event={"ID":"6590af4f-61f2-496f-b13e-368db5637b6a","Type":"ContainerDied","Data":"ad639b64c83455d65c7f43e3f79748ac1f31bd7fa8750abe5d57466157fea8b8"} Nov 26 22:46:31 crc kubenswrapper[4903]: I1126 22:46:31.229386 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4qfmw" event={"ID":"6590af4f-61f2-496f-b13e-368db5637b6a","Type":"ContainerDied","Data":"b5495168d7be0521b494171b5fcb2c5f02a2fc155afabe4659611d90c88f6c34"} Nov 26 22:46:31 crc kubenswrapper[4903]: I1126 22:46:31.229406 4903 scope.go:117] "RemoveContainer" containerID="ad639b64c83455d65c7f43e3f79748ac1f31bd7fa8750abe5d57466157fea8b8" Nov 26 22:46:31 crc kubenswrapper[4903]: I1126 22:46:31.233857 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bxpxf" event={"ID":"23d7d0fe-46b7-41b2-b568-56f18f564748","Type":"ContainerDied","Data":"bfe64ce7a419cacd55bd20288a4464a6d51ae4095f8770980580b0a4a9e65d89"} Nov 26 22:46:31 crc kubenswrapper[4903]: I1126 22:46:31.233907 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bxpxf" Nov 26 22:46:31 crc kubenswrapper[4903]: I1126 22:46:31.249205 4903 scope.go:117] "RemoveContainer" containerID="07366f7fced382d2c633327e5e4f37121e8575ecbfcdddb60ddbddc0e7aeab46" Nov 26 22:46:31 crc kubenswrapper[4903]: I1126 22:46:31.283580 4903 scope.go:117] "RemoveContainer" containerID="10c8ad63546a698d3d515d7049311e1730b01cd6149d42c919873dfe12b6bfa4" Nov 26 22:46:31 crc kubenswrapper[4903]: I1126 22:46:31.286867 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bxpxf"] Nov 26 22:46:31 crc kubenswrapper[4903]: I1126 22:46:31.313745 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-bxpxf"] Nov 26 22:46:31 crc kubenswrapper[4903]: I1126 22:46:31.336738 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4qfmw"] Nov 26 22:46:31 crc kubenswrapper[4903]: I1126 22:46:31.360885 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-4qfmw"] Nov 26 22:46:31 crc kubenswrapper[4903]: I1126 22:46:31.376833 4903 scope.go:117] "RemoveContainer" containerID="ad639b64c83455d65c7f43e3f79748ac1f31bd7fa8750abe5d57466157fea8b8" Nov 26 22:46:31 crc kubenswrapper[4903]: E1126 22:46:31.381468 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ad639b64c83455d65c7f43e3f79748ac1f31bd7fa8750abe5d57466157fea8b8\": container with ID starting with ad639b64c83455d65c7f43e3f79748ac1f31bd7fa8750abe5d57466157fea8b8 not found: ID does not exist" containerID="ad639b64c83455d65c7f43e3f79748ac1f31bd7fa8750abe5d57466157fea8b8" Nov 26 22:46:31 crc kubenswrapper[4903]: I1126 22:46:31.381504 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad639b64c83455d65c7f43e3f79748ac1f31bd7fa8750abe5d57466157fea8b8"} err="failed to get container status \"ad639b64c83455d65c7f43e3f79748ac1f31bd7fa8750abe5d57466157fea8b8\": rpc error: code = NotFound desc = could not find container \"ad639b64c83455d65c7f43e3f79748ac1f31bd7fa8750abe5d57466157fea8b8\": container with ID starting with ad639b64c83455d65c7f43e3f79748ac1f31bd7fa8750abe5d57466157fea8b8 not found: ID does not exist" Nov 26 22:46:31 crc kubenswrapper[4903]: I1126 22:46:31.381529 4903 scope.go:117] "RemoveContainer" containerID="07366f7fced382d2c633327e5e4f37121e8575ecbfcdddb60ddbddc0e7aeab46" Nov 26 22:46:31 crc kubenswrapper[4903]: E1126 22:46:31.387157 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"07366f7fced382d2c633327e5e4f37121e8575ecbfcdddb60ddbddc0e7aeab46\": container with ID starting with 07366f7fced382d2c633327e5e4f37121e8575ecbfcdddb60ddbddc0e7aeab46 not found: ID does not exist" containerID="07366f7fced382d2c633327e5e4f37121e8575ecbfcdddb60ddbddc0e7aeab46" Nov 26 22:46:31 crc kubenswrapper[4903]: I1126 22:46:31.387185 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07366f7fced382d2c633327e5e4f37121e8575ecbfcdddb60ddbddc0e7aeab46"} err="failed to get container status \"07366f7fced382d2c633327e5e4f37121e8575ecbfcdddb60ddbddc0e7aeab46\": rpc error: code = NotFound desc = could not find container \"07366f7fced382d2c633327e5e4f37121e8575ecbfcdddb60ddbddc0e7aeab46\": container with ID starting with 07366f7fced382d2c633327e5e4f37121e8575ecbfcdddb60ddbddc0e7aeab46 not found: ID does not exist" Nov 26 22:46:31 crc kubenswrapper[4903]: I1126 22:46:31.387204 4903 scope.go:117] "RemoveContainer" containerID="10c8ad63546a698d3d515d7049311e1730b01cd6149d42c919873dfe12b6bfa4" Nov 26 22:46:31 crc kubenswrapper[4903]: E1126 22:46:31.388838 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10c8ad63546a698d3d515d7049311e1730b01cd6149d42c919873dfe12b6bfa4\": container with ID starting with 10c8ad63546a698d3d515d7049311e1730b01cd6149d42c919873dfe12b6bfa4 not found: ID does not exist" containerID="10c8ad63546a698d3d515d7049311e1730b01cd6149d42c919873dfe12b6bfa4" Nov 26 22:46:31 crc kubenswrapper[4903]: I1126 22:46:31.388864 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10c8ad63546a698d3d515d7049311e1730b01cd6149d42c919873dfe12b6bfa4"} err="failed to get container status \"10c8ad63546a698d3d515d7049311e1730b01cd6149d42c919873dfe12b6bfa4\": rpc error: code = NotFound desc = could not find container \"10c8ad63546a698d3d515d7049311e1730b01cd6149d42c919873dfe12b6bfa4\": container with ID starting with 10c8ad63546a698d3d515d7049311e1730b01cd6149d42c919873dfe12b6bfa4 not found: ID does not exist" Nov 26 22:46:31 crc kubenswrapper[4903]: I1126 22:46:31.388877 4903 scope.go:117] "RemoveContainer" containerID="c59a336dd1d693b483d5ed67064dde4971aefe60414b1fb8ebef5380723e10f9" Nov 26 22:46:31 crc kubenswrapper[4903]: I1126 22:46:31.417100 4903 scope.go:117] "RemoveContainer" containerID="6173fbea37d148de543b8886e044de341f50ca684a84a6ee279b65425039b006" Nov 26 22:46:31 crc kubenswrapper[4903]: I1126 22:46:31.440201 4903 scope.go:117] "RemoveContainer" containerID="98873735f433a17dae345725586347dd37ad1acefe9ea956f5660e6dc8b718b2" Nov 26 22:46:32 crc kubenswrapper[4903]: I1126 22:46:32.040030 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" path="/var/lib/kubelet/pods/23d7d0fe-46b7-41b2-b568-56f18f564748/volumes" Nov 26 22:46:32 crc kubenswrapper[4903]: I1126 22:46:32.041026 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6590af4f-61f2-496f-b13e-368db5637b6a" path="/var/lib/kubelet/pods/6590af4f-61f2-496f-b13e-368db5637b6a/volumes" Nov 26 22:46:33 crc kubenswrapper[4903]: I1126 22:46:33.950240 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-ljrkr" Nov 26 22:46:34 crc kubenswrapper[4903]: I1126 22:46:34.015500 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-ljrkr" Nov 26 22:46:34 crc kubenswrapper[4903]: I1126 22:46:34.454733 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ljrkr"] Nov 26 22:46:35 crc kubenswrapper[4903]: I1126 22:46:35.027320 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-29lbn" podUID="0b4ba05d-5933-445a-aa3a-6c7766b73ebc" containerName="registry-server" probeResult="failure" output=< Nov 26 22:46:35 crc kubenswrapper[4903]: timeout: failed to connect service ":50051" within 1s Nov 26 22:46:35 crc kubenswrapper[4903]: > Nov 26 22:46:35 crc kubenswrapper[4903]: I1126 22:46:35.286446 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-ljrkr" podUID="5e57af0e-bec3-4e7a-9644-52a660107434" containerName="registry-server" containerID="cri-o://16cb83d6e3fcadd03a3c99e83c55d8e74bf01a902da25d470bf856279bc5f255" gracePeriod=2 Nov 26 22:46:35 crc kubenswrapper[4903]: E1126 22:46:35.438434 4903 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5e57af0e_bec3_4e7a_9644_52a660107434.slice/crio-conmon-16cb83d6e3fcadd03a3c99e83c55d8e74bf01a902da25d470bf856279bc5f255.scope\": RecentStats: unable to find data in memory cache]" Nov 26 22:46:35 crc kubenswrapper[4903]: I1126 22:46:35.904552 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ljrkr" Nov 26 22:46:36 crc kubenswrapper[4903]: I1126 22:46:36.087306 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vzpxk\" (UniqueName: \"kubernetes.io/projected/5e57af0e-bec3-4e7a-9644-52a660107434-kube-api-access-vzpxk\") pod \"5e57af0e-bec3-4e7a-9644-52a660107434\" (UID: \"5e57af0e-bec3-4e7a-9644-52a660107434\") " Nov 26 22:46:36 crc kubenswrapper[4903]: I1126 22:46:36.087683 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e57af0e-bec3-4e7a-9644-52a660107434-utilities\") pod \"5e57af0e-bec3-4e7a-9644-52a660107434\" (UID: \"5e57af0e-bec3-4e7a-9644-52a660107434\") " Nov 26 22:46:36 crc kubenswrapper[4903]: I1126 22:46:36.088049 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e57af0e-bec3-4e7a-9644-52a660107434-catalog-content\") pod \"5e57af0e-bec3-4e7a-9644-52a660107434\" (UID: \"5e57af0e-bec3-4e7a-9644-52a660107434\") " Nov 26 22:46:36 crc kubenswrapper[4903]: I1126 22:46:36.088438 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5e57af0e-bec3-4e7a-9644-52a660107434-utilities" (OuterVolumeSpecName: "utilities") pod "5e57af0e-bec3-4e7a-9644-52a660107434" (UID: "5e57af0e-bec3-4e7a-9644-52a660107434"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:46:36 crc kubenswrapper[4903]: I1126 22:46:36.089507 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e57af0e-bec3-4e7a-9644-52a660107434-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:36 crc kubenswrapper[4903]: I1126 22:46:36.095042 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e57af0e-bec3-4e7a-9644-52a660107434-kube-api-access-vzpxk" (OuterVolumeSpecName: "kube-api-access-vzpxk") pod "5e57af0e-bec3-4e7a-9644-52a660107434" (UID: "5e57af0e-bec3-4e7a-9644-52a660107434"). InnerVolumeSpecName "kube-api-access-vzpxk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:46:36 crc kubenswrapper[4903]: I1126 22:46:36.131435 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5e57af0e-bec3-4e7a-9644-52a660107434-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5e57af0e-bec3-4e7a-9644-52a660107434" (UID: "5e57af0e-bec3-4e7a-9644-52a660107434"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:46:36 crc kubenswrapper[4903]: I1126 22:46:36.190528 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e57af0e-bec3-4e7a-9644-52a660107434-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:36 crc kubenswrapper[4903]: I1126 22:46:36.190574 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vzpxk\" (UniqueName: \"kubernetes.io/projected/5e57af0e-bec3-4e7a-9644-52a660107434-kube-api-access-vzpxk\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:36 crc kubenswrapper[4903]: I1126 22:46:36.306302 4903 generic.go:334] "Generic (PLEG): container finished" podID="5e57af0e-bec3-4e7a-9644-52a660107434" containerID="16cb83d6e3fcadd03a3c99e83c55d8e74bf01a902da25d470bf856279bc5f255" exitCode=0 Nov 26 22:46:36 crc kubenswrapper[4903]: I1126 22:46:36.306355 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ljrkr" Nov 26 22:46:36 crc kubenswrapper[4903]: I1126 22:46:36.306378 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ljrkr" event={"ID":"5e57af0e-bec3-4e7a-9644-52a660107434","Type":"ContainerDied","Data":"16cb83d6e3fcadd03a3c99e83c55d8e74bf01a902da25d470bf856279bc5f255"} Nov 26 22:46:36 crc kubenswrapper[4903]: I1126 22:46:36.306762 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ljrkr" event={"ID":"5e57af0e-bec3-4e7a-9644-52a660107434","Type":"ContainerDied","Data":"d327196e5f9dfcb09980cc3a644b88ff269de32c732e67b6b1c109b41a683b98"} Nov 26 22:46:36 crc kubenswrapper[4903]: I1126 22:46:36.306782 4903 scope.go:117] "RemoveContainer" containerID="16cb83d6e3fcadd03a3c99e83c55d8e74bf01a902da25d470bf856279bc5f255" Nov 26 22:46:36 crc kubenswrapper[4903]: I1126 22:46:36.342418 4903 scope.go:117] "RemoveContainer" containerID="eb173b0b4a5cd223ca3257b52e912dbbb9d27bbdc0757840682d629e8caa6ad4" Nov 26 22:46:36 crc kubenswrapper[4903]: I1126 22:46:36.352957 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ljrkr"] Nov 26 22:46:36 crc kubenswrapper[4903]: I1126 22:46:36.363891 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-ljrkr"] Nov 26 22:46:36 crc kubenswrapper[4903]: I1126 22:46:36.389735 4903 scope.go:117] "RemoveContainer" containerID="b1967cb25036e804c7f3394eb30e15d3f546a1546f26fb0aa1fc2ce5dc2920d8" Nov 26 22:46:36 crc kubenswrapper[4903]: I1126 22:46:36.434847 4903 scope.go:117] "RemoveContainer" containerID="16cb83d6e3fcadd03a3c99e83c55d8e74bf01a902da25d470bf856279bc5f255" Nov 26 22:46:36 crc kubenswrapper[4903]: E1126 22:46:36.435352 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"16cb83d6e3fcadd03a3c99e83c55d8e74bf01a902da25d470bf856279bc5f255\": container with ID starting with 16cb83d6e3fcadd03a3c99e83c55d8e74bf01a902da25d470bf856279bc5f255 not found: ID does not exist" containerID="16cb83d6e3fcadd03a3c99e83c55d8e74bf01a902da25d470bf856279bc5f255" Nov 26 22:46:36 crc kubenswrapper[4903]: I1126 22:46:36.435397 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"16cb83d6e3fcadd03a3c99e83c55d8e74bf01a902da25d470bf856279bc5f255"} err="failed to get container status \"16cb83d6e3fcadd03a3c99e83c55d8e74bf01a902da25d470bf856279bc5f255\": rpc error: code = NotFound desc = could not find container \"16cb83d6e3fcadd03a3c99e83c55d8e74bf01a902da25d470bf856279bc5f255\": container with ID starting with 16cb83d6e3fcadd03a3c99e83c55d8e74bf01a902da25d470bf856279bc5f255 not found: ID does not exist" Nov 26 22:46:36 crc kubenswrapper[4903]: I1126 22:46:36.435423 4903 scope.go:117] "RemoveContainer" containerID="eb173b0b4a5cd223ca3257b52e912dbbb9d27bbdc0757840682d629e8caa6ad4" Nov 26 22:46:36 crc kubenswrapper[4903]: E1126 22:46:36.435781 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb173b0b4a5cd223ca3257b52e912dbbb9d27bbdc0757840682d629e8caa6ad4\": container with ID starting with eb173b0b4a5cd223ca3257b52e912dbbb9d27bbdc0757840682d629e8caa6ad4 not found: ID does not exist" containerID="eb173b0b4a5cd223ca3257b52e912dbbb9d27bbdc0757840682d629e8caa6ad4" Nov 26 22:46:36 crc kubenswrapper[4903]: I1126 22:46:36.435820 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb173b0b4a5cd223ca3257b52e912dbbb9d27bbdc0757840682d629e8caa6ad4"} err="failed to get container status \"eb173b0b4a5cd223ca3257b52e912dbbb9d27bbdc0757840682d629e8caa6ad4\": rpc error: code = NotFound desc = could not find container \"eb173b0b4a5cd223ca3257b52e912dbbb9d27bbdc0757840682d629e8caa6ad4\": container with ID starting with eb173b0b4a5cd223ca3257b52e912dbbb9d27bbdc0757840682d629e8caa6ad4 not found: ID does not exist" Nov 26 22:46:36 crc kubenswrapper[4903]: I1126 22:46:36.435849 4903 scope.go:117] "RemoveContainer" containerID="b1967cb25036e804c7f3394eb30e15d3f546a1546f26fb0aa1fc2ce5dc2920d8" Nov 26 22:46:36 crc kubenswrapper[4903]: E1126 22:46:36.436126 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1967cb25036e804c7f3394eb30e15d3f546a1546f26fb0aa1fc2ce5dc2920d8\": container with ID starting with b1967cb25036e804c7f3394eb30e15d3f546a1546f26fb0aa1fc2ce5dc2920d8 not found: ID does not exist" containerID="b1967cb25036e804c7f3394eb30e15d3f546a1546f26fb0aa1fc2ce5dc2920d8" Nov 26 22:46:36 crc kubenswrapper[4903]: I1126 22:46:36.436151 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1967cb25036e804c7f3394eb30e15d3f546a1546f26fb0aa1fc2ce5dc2920d8"} err="failed to get container status \"b1967cb25036e804c7f3394eb30e15d3f546a1546f26fb0aa1fc2ce5dc2920d8\": rpc error: code = NotFound desc = could not find container \"b1967cb25036e804c7f3394eb30e15d3f546a1546f26fb0aa1fc2ce5dc2920d8\": container with ID starting with b1967cb25036e804c7f3394eb30e15d3f546a1546f26fb0aa1fc2ce5dc2920d8 not found: ID does not exist" Nov 26 22:46:38 crc kubenswrapper[4903]: I1126 22:46:38.042133 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5e57af0e-bec3-4e7a-9644-52a660107434" path="/var/lib/kubelet/pods/5e57af0e-bec3-4e7a-9644-52a660107434/volumes" Nov 26 22:46:38 crc kubenswrapper[4903]: I1126 22:46:38.850330 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 26 22:46:44 crc kubenswrapper[4903]: I1126 22:46:44.015753 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-29lbn" Nov 26 22:46:44 crc kubenswrapper[4903]: I1126 22:46:44.071788 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-29lbn" Nov 26 22:46:44 crc kubenswrapper[4903]: I1126 22:46:44.228315 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 22:46:44 crc kubenswrapper[4903]: I1126 22:46:44.228627 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="f58d4082-e69c-44e2-9961-9842cb738869" containerName="kube-state-metrics" containerID="cri-o://35037d56510b3726b4dea82db297200bcd800204b1720350b319b85a81adcfd5" gracePeriod=30 Nov 26 22:46:44 crc kubenswrapper[4903]: I1126 22:46:44.289705 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 26 22:46:44 crc kubenswrapper[4903]: I1126 22:46:44.289913 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/mysqld-exporter-0" podUID="ca375e39-4f68-4f91-be27-8b4975a0ea3c" containerName="mysqld-exporter" containerID="cri-o://70537a6667b1295ad201c8f866f8eb27dcf8a19eb53f2e37be576ec8f18ee477" gracePeriod=30 Nov 26 22:46:44 crc kubenswrapper[4903]: I1126 22:46:44.402972 4903 generic.go:334] "Generic (PLEG): container finished" podID="f58d4082-e69c-44e2-9961-9842cb738869" containerID="35037d56510b3726b4dea82db297200bcd800204b1720350b319b85a81adcfd5" exitCode=2 Nov 26 22:46:44 crc kubenswrapper[4903]: I1126 22:46:44.403059 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f58d4082-e69c-44e2-9961-9842cb738869","Type":"ContainerDied","Data":"35037d56510b3726b4dea82db297200bcd800204b1720350b319b85a81adcfd5"} Nov 26 22:46:44 crc kubenswrapper[4903]: I1126 22:46:44.403589 4903 scope.go:117] "RemoveContainer" containerID="c2450cce3468d69a60f2d74e34daec8cfa728e35f26892f2237db68674914921" Nov 26 22:46:44 crc kubenswrapper[4903]: I1126 22:46:44.625593 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-29lbn"] Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.074021 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.079396 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.234117 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca375e39-4f68-4f91-be27-8b4975a0ea3c-config-data\") pod \"ca375e39-4f68-4f91-be27-8b4975a0ea3c\" (UID: \"ca375e39-4f68-4f91-be27-8b4975a0ea3c\") " Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.234292 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k26s4\" (UniqueName: \"kubernetes.io/projected/f58d4082-e69c-44e2-9961-9842cb738869-kube-api-access-k26s4\") pod \"f58d4082-e69c-44e2-9961-9842cb738869\" (UID: \"f58d4082-e69c-44e2-9961-9842cb738869\") " Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.234318 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca375e39-4f68-4f91-be27-8b4975a0ea3c-combined-ca-bundle\") pod \"ca375e39-4f68-4f91-be27-8b4975a0ea3c\" (UID: \"ca375e39-4f68-4f91-be27-8b4975a0ea3c\") " Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.234551 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s2kzp\" (UniqueName: \"kubernetes.io/projected/ca375e39-4f68-4f91-be27-8b4975a0ea3c-kube-api-access-s2kzp\") pod \"ca375e39-4f68-4f91-be27-8b4975a0ea3c\" (UID: \"ca375e39-4f68-4f91-be27-8b4975a0ea3c\") " Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.251269 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f58d4082-e69c-44e2-9961-9842cb738869-kube-api-access-k26s4" (OuterVolumeSpecName: "kube-api-access-k26s4") pod "f58d4082-e69c-44e2-9961-9842cb738869" (UID: "f58d4082-e69c-44e2-9961-9842cb738869"). InnerVolumeSpecName "kube-api-access-k26s4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.251459 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca375e39-4f68-4f91-be27-8b4975a0ea3c-kube-api-access-s2kzp" (OuterVolumeSpecName: "kube-api-access-s2kzp") pod "ca375e39-4f68-4f91-be27-8b4975a0ea3c" (UID: "ca375e39-4f68-4f91-be27-8b4975a0ea3c"). InnerVolumeSpecName "kube-api-access-s2kzp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.286937 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca375e39-4f68-4f91-be27-8b4975a0ea3c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ca375e39-4f68-4f91-be27-8b4975a0ea3c" (UID: "ca375e39-4f68-4f91-be27-8b4975a0ea3c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.317499 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca375e39-4f68-4f91-be27-8b4975a0ea3c-config-data" (OuterVolumeSpecName: "config-data") pod "ca375e39-4f68-4f91-be27-8b4975a0ea3c" (UID: "ca375e39-4f68-4f91-be27-8b4975a0ea3c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.337561 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s2kzp\" (UniqueName: \"kubernetes.io/projected/ca375e39-4f68-4f91-be27-8b4975a0ea3c-kube-api-access-s2kzp\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.337596 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca375e39-4f68-4f91-be27-8b4975a0ea3c-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.337610 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k26s4\" (UniqueName: \"kubernetes.io/projected/f58d4082-e69c-44e2-9961-9842cb738869-kube-api-access-k26s4\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.337619 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca375e39-4f68-4f91-be27-8b4975a0ea3c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.413906 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f58d4082-e69c-44e2-9961-9842cb738869","Type":"ContainerDied","Data":"811f0347a01b5853f4648e6ac01c00b8ed69289d5baa4432298544c62f314fd7"} Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.413933 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.413957 4903 scope.go:117] "RemoveContainer" containerID="35037d56510b3726b4dea82db297200bcd800204b1720350b319b85a81adcfd5" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.416443 4903 generic.go:334] "Generic (PLEG): container finished" podID="ca375e39-4f68-4f91-be27-8b4975a0ea3c" containerID="70537a6667b1295ad201c8f866f8eb27dcf8a19eb53f2e37be576ec8f18ee477" exitCode=2 Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.416636 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-29lbn" podUID="0b4ba05d-5933-445a-aa3a-6c7766b73ebc" containerName="registry-server" containerID="cri-o://c64cfa475d004d95ce024f921923a3388a30f6f73eeb71768cf316463e5cce1f" gracePeriod=2 Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.416923 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.417013 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"ca375e39-4f68-4f91-be27-8b4975a0ea3c","Type":"ContainerDied","Data":"70537a6667b1295ad201c8f866f8eb27dcf8a19eb53f2e37be576ec8f18ee477"} Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.417063 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"ca375e39-4f68-4f91-be27-8b4975a0ea3c","Type":"ContainerDied","Data":"3f7ea076d91bad3d6b64e9e90cb7a7f566df209950672b78483ed74b7445ab83"} Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.443681 4903 scope.go:117] "RemoveContainer" containerID="70537a6667b1295ad201c8f866f8eb27dcf8a19eb53f2e37be576ec8f18ee477" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.468795 4903 scope.go:117] "RemoveContainer" containerID="70537a6667b1295ad201c8f866f8eb27dcf8a19eb53f2e37be576ec8f18ee477" Nov 26 22:46:45 crc kubenswrapper[4903]: E1126 22:46:45.469490 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70537a6667b1295ad201c8f866f8eb27dcf8a19eb53f2e37be576ec8f18ee477\": container with ID starting with 70537a6667b1295ad201c8f866f8eb27dcf8a19eb53f2e37be576ec8f18ee477 not found: ID does not exist" containerID="70537a6667b1295ad201c8f866f8eb27dcf8a19eb53f2e37be576ec8f18ee477" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.469536 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70537a6667b1295ad201c8f866f8eb27dcf8a19eb53f2e37be576ec8f18ee477"} err="failed to get container status \"70537a6667b1295ad201c8f866f8eb27dcf8a19eb53f2e37be576ec8f18ee477\": rpc error: code = NotFound desc = could not find container \"70537a6667b1295ad201c8f866f8eb27dcf8a19eb53f2e37be576ec8f18ee477\": container with ID starting with 70537a6667b1295ad201c8f866f8eb27dcf8a19eb53f2e37be576ec8f18ee477 not found: ID does not exist" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.473309 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.491770 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.518759 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.565311 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 22:46:45 crc kubenswrapper[4903]: E1126 22:46:45.565877 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f58d4082-e69c-44e2-9961-9842cb738869" containerName="kube-state-metrics" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.565894 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f58d4082-e69c-44e2-9961-9842cb738869" containerName="kube-state-metrics" Nov 26 22:46:45 crc kubenswrapper[4903]: E1126 22:46:45.565921 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" containerName="extract-utilities" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.565928 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" containerName="extract-utilities" Nov 26 22:46:45 crc kubenswrapper[4903]: E1126 22:46:45.565937 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f58d4082-e69c-44e2-9961-9842cb738869" containerName="kube-state-metrics" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.565943 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f58d4082-e69c-44e2-9961-9842cb738869" containerName="kube-state-metrics" Nov 26 22:46:45 crc kubenswrapper[4903]: E1126 22:46:45.565957 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" containerName="registry-server" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.565963 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" containerName="registry-server" Nov 26 22:46:45 crc kubenswrapper[4903]: E1126 22:46:45.565983 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6590af4f-61f2-496f-b13e-368db5637b6a" containerName="extract-content" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.565989 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="6590af4f-61f2-496f-b13e-368db5637b6a" containerName="extract-content" Nov 26 22:46:45 crc kubenswrapper[4903]: E1126 22:46:45.566006 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e57af0e-bec3-4e7a-9644-52a660107434" containerName="registry-server" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.566011 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e57af0e-bec3-4e7a-9644-52a660107434" containerName="registry-server" Nov 26 22:46:45 crc kubenswrapper[4903]: E1126 22:46:45.566025 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f58d4082-e69c-44e2-9961-9842cb738869" containerName="kube-state-metrics" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.566031 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f58d4082-e69c-44e2-9961-9842cb738869" containerName="kube-state-metrics" Nov 26 22:46:45 crc kubenswrapper[4903]: E1126 22:46:45.566043 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e57af0e-bec3-4e7a-9644-52a660107434" containerName="extract-utilities" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.566049 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e57af0e-bec3-4e7a-9644-52a660107434" containerName="extract-utilities" Nov 26 22:46:45 crc kubenswrapper[4903]: E1126 22:46:45.566065 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e57af0e-bec3-4e7a-9644-52a660107434" containerName="extract-content" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.566071 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e57af0e-bec3-4e7a-9644-52a660107434" containerName="extract-content" Nov 26 22:46:45 crc kubenswrapper[4903]: E1126 22:46:45.566080 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6590af4f-61f2-496f-b13e-368db5637b6a" containerName="registry-server" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.566088 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="6590af4f-61f2-496f-b13e-368db5637b6a" containerName="registry-server" Nov 26 22:46:45 crc kubenswrapper[4903]: E1126 22:46:45.566101 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" containerName="extract-content" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.566107 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" containerName="extract-content" Nov 26 22:46:45 crc kubenswrapper[4903]: E1126 22:46:45.566123 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f58d4082-e69c-44e2-9961-9842cb738869" containerName="kube-state-metrics" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.566128 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f58d4082-e69c-44e2-9961-9842cb738869" containerName="kube-state-metrics" Nov 26 22:46:45 crc kubenswrapper[4903]: E1126 22:46:45.566143 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6590af4f-61f2-496f-b13e-368db5637b6a" containerName="extract-utilities" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.566150 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="6590af4f-61f2-496f-b13e-368db5637b6a" containerName="extract-utilities" Nov 26 22:46:45 crc kubenswrapper[4903]: E1126 22:46:45.566168 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca375e39-4f68-4f91-be27-8b4975a0ea3c" containerName="mysqld-exporter" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.566174 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca375e39-4f68-4f91-be27-8b4975a0ea3c" containerName="mysqld-exporter" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.566374 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="f58d4082-e69c-44e2-9961-9842cb738869" containerName="kube-state-metrics" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.566385 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="23d7d0fe-46b7-41b2-b568-56f18f564748" containerName="registry-server" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.566393 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="f58d4082-e69c-44e2-9961-9842cb738869" containerName="kube-state-metrics" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.566410 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e57af0e-bec3-4e7a-9644-52a660107434" containerName="registry-server" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.566430 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="6590af4f-61f2-496f-b13e-368db5637b6a" containerName="registry-server" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.566437 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca375e39-4f68-4f91-be27-8b4975a0ea3c" containerName="mysqld-exporter" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.567252 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.571949 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.572008 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.595299 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.624524 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.641146 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-0"] Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.642002 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="f58d4082-e69c-44e2-9961-9842cb738869" containerName="kube-state-metrics" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.642024 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="f58d4082-e69c-44e2-9961-9842cb738869" containerName="kube-state-metrics" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.648226 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.652039 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.652412 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-mysqld-exporter-svc" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.652542 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-config-data" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.758423 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/0f92d7db-9155-4bdc-8285-29091382434c-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"0f92d7db-9155-4bdc-8285-29091382434c\") " pod="openstack/kube-state-metrics-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.758735 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0677cff-9cf4-4eba-bb4b-4fea82d38f71-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"a0677cff-9cf4-4eba-bb4b-4fea82d38f71\") " pod="openstack/mysqld-exporter-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.758762 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f92d7db-9155-4bdc-8285-29091382434c-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"0f92d7db-9155-4bdc-8285-29091382434c\") " pod="openstack/kube-state-metrics-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.758822 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/0f92d7db-9155-4bdc-8285-29091382434c-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"0f92d7db-9155-4bdc-8285-29091382434c\") " pod="openstack/kube-state-metrics-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.758879 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-flq5j\" (UniqueName: \"kubernetes.io/projected/a0677cff-9cf4-4eba-bb4b-4fea82d38f71-kube-api-access-flq5j\") pod \"mysqld-exporter-0\" (UID: \"a0677cff-9cf4-4eba-bb4b-4fea82d38f71\") " pod="openstack/mysqld-exporter-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.758897 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/a0677cff-9cf4-4eba-bb4b-4fea82d38f71-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"a0677cff-9cf4-4eba-bb4b-4fea82d38f71\") " pod="openstack/mysqld-exporter-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.758930 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0677cff-9cf4-4eba-bb4b-4fea82d38f71-config-data\") pod \"mysqld-exporter-0\" (UID: \"a0677cff-9cf4-4eba-bb4b-4fea82d38f71\") " pod="openstack/mysqld-exporter-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.758965 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4rdzw\" (UniqueName: \"kubernetes.io/projected/0f92d7db-9155-4bdc-8285-29091382434c-kube-api-access-4rdzw\") pod \"kube-state-metrics-0\" (UID: \"0f92d7db-9155-4bdc-8285-29091382434c\") " pod="openstack/kube-state-metrics-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.861171 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0677cff-9cf4-4eba-bb4b-4fea82d38f71-config-data\") pod \"mysqld-exporter-0\" (UID: \"a0677cff-9cf4-4eba-bb4b-4fea82d38f71\") " pod="openstack/mysqld-exporter-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.861245 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4rdzw\" (UniqueName: \"kubernetes.io/projected/0f92d7db-9155-4bdc-8285-29091382434c-kube-api-access-4rdzw\") pod \"kube-state-metrics-0\" (UID: \"0f92d7db-9155-4bdc-8285-29091382434c\") " pod="openstack/kube-state-metrics-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.861322 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/0f92d7db-9155-4bdc-8285-29091382434c-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"0f92d7db-9155-4bdc-8285-29091382434c\") " pod="openstack/kube-state-metrics-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.861358 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0677cff-9cf4-4eba-bb4b-4fea82d38f71-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"a0677cff-9cf4-4eba-bb4b-4fea82d38f71\") " pod="openstack/mysqld-exporter-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.861380 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f92d7db-9155-4bdc-8285-29091382434c-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"0f92d7db-9155-4bdc-8285-29091382434c\") " pod="openstack/kube-state-metrics-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.861432 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/0f92d7db-9155-4bdc-8285-29091382434c-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"0f92d7db-9155-4bdc-8285-29091382434c\") " pod="openstack/kube-state-metrics-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.861485 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-flq5j\" (UniqueName: \"kubernetes.io/projected/a0677cff-9cf4-4eba-bb4b-4fea82d38f71-kube-api-access-flq5j\") pod \"mysqld-exporter-0\" (UID: \"a0677cff-9cf4-4eba-bb4b-4fea82d38f71\") " pod="openstack/mysqld-exporter-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.861501 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/a0677cff-9cf4-4eba-bb4b-4fea82d38f71-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"a0677cff-9cf4-4eba-bb4b-4fea82d38f71\") " pod="openstack/mysqld-exporter-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.883334 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4rdzw\" (UniqueName: \"kubernetes.io/projected/0f92d7db-9155-4bdc-8285-29091382434c-kube-api-access-4rdzw\") pod \"kube-state-metrics-0\" (UID: \"0f92d7db-9155-4bdc-8285-29091382434c\") " pod="openstack/kube-state-metrics-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.884777 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0677cff-9cf4-4eba-bb4b-4fea82d38f71-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"a0677cff-9cf4-4eba-bb4b-4fea82d38f71\") " pod="openstack/mysqld-exporter-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.885266 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/a0677cff-9cf4-4eba-bb4b-4fea82d38f71-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"a0677cff-9cf4-4eba-bb4b-4fea82d38f71\") " pod="openstack/mysqld-exporter-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.886602 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0677cff-9cf4-4eba-bb4b-4fea82d38f71-config-data\") pod \"mysqld-exporter-0\" (UID: \"a0677cff-9cf4-4eba-bb4b-4fea82d38f71\") " pod="openstack/mysqld-exporter-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.887167 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-flq5j\" (UniqueName: \"kubernetes.io/projected/a0677cff-9cf4-4eba-bb4b-4fea82d38f71-kube-api-access-flq5j\") pod \"mysqld-exporter-0\" (UID: \"a0677cff-9cf4-4eba-bb4b-4fea82d38f71\") " pod="openstack/mysqld-exporter-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.887499 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/0f92d7db-9155-4bdc-8285-29091382434c-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"0f92d7db-9155-4bdc-8285-29091382434c\") " pod="openstack/kube-state-metrics-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.888433 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f92d7db-9155-4bdc-8285-29091382434c-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"0f92d7db-9155-4bdc-8285-29091382434c\") " pod="openstack/kube-state-metrics-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.889186 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/0f92d7db-9155-4bdc-8285-29091382434c-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"0f92d7db-9155-4bdc-8285-29091382434c\") " pod="openstack/kube-state-metrics-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.902123 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 22:46:45 crc kubenswrapper[4903]: I1126 22:46:45.965129 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Nov 26 22:46:46 crc kubenswrapper[4903]: I1126 22:46:46.055972 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ca375e39-4f68-4f91-be27-8b4975a0ea3c" path="/var/lib/kubelet/pods/ca375e39-4f68-4f91-be27-8b4975a0ea3c/volumes" Nov 26 22:46:46 crc kubenswrapper[4903]: I1126 22:46:46.057235 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f58d4082-e69c-44e2-9961-9842cb738869" path="/var/lib/kubelet/pods/f58d4082-e69c-44e2-9961-9842cb738869/volumes" Nov 26 22:46:46 crc kubenswrapper[4903]: I1126 22:46:46.062533 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-29lbn" Nov 26 22:46:46 crc kubenswrapper[4903]: I1126 22:46:46.172625 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nfscv\" (UniqueName: \"kubernetes.io/projected/0b4ba05d-5933-445a-aa3a-6c7766b73ebc-kube-api-access-nfscv\") pod \"0b4ba05d-5933-445a-aa3a-6c7766b73ebc\" (UID: \"0b4ba05d-5933-445a-aa3a-6c7766b73ebc\") " Nov 26 22:46:46 crc kubenswrapper[4903]: I1126 22:46:46.172872 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b4ba05d-5933-445a-aa3a-6c7766b73ebc-utilities\") pod \"0b4ba05d-5933-445a-aa3a-6c7766b73ebc\" (UID: \"0b4ba05d-5933-445a-aa3a-6c7766b73ebc\") " Nov 26 22:46:46 crc kubenswrapper[4903]: I1126 22:46:46.173056 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b4ba05d-5933-445a-aa3a-6c7766b73ebc-catalog-content\") pod \"0b4ba05d-5933-445a-aa3a-6c7766b73ebc\" (UID: \"0b4ba05d-5933-445a-aa3a-6c7766b73ebc\") " Nov 26 22:46:46 crc kubenswrapper[4903]: I1126 22:46:46.177414 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b4ba05d-5933-445a-aa3a-6c7766b73ebc-utilities" (OuterVolumeSpecName: "utilities") pod "0b4ba05d-5933-445a-aa3a-6c7766b73ebc" (UID: "0b4ba05d-5933-445a-aa3a-6c7766b73ebc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:46:46 crc kubenswrapper[4903]: I1126 22:46:46.200720 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b4ba05d-5933-445a-aa3a-6c7766b73ebc-kube-api-access-nfscv" (OuterVolumeSpecName: "kube-api-access-nfscv") pod "0b4ba05d-5933-445a-aa3a-6c7766b73ebc" (UID: "0b4ba05d-5933-445a-aa3a-6c7766b73ebc"). InnerVolumeSpecName "kube-api-access-nfscv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:46:46 crc kubenswrapper[4903]: I1126 22:46:46.243947 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b4ba05d-5933-445a-aa3a-6c7766b73ebc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0b4ba05d-5933-445a-aa3a-6c7766b73ebc" (UID: "0b4ba05d-5933-445a-aa3a-6c7766b73ebc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:46:46 crc kubenswrapper[4903]: I1126 22:46:46.275599 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b4ba05d-5933-445a-aa3a-6c7766b73ebc-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:46 crc kubenswrapper[4903]: I1126 22:46:46.275631 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nfscv\" (UniqueName: \"kubernetes.io/projected/0b4ba05d-5933-445a-aa3a-6c7766b73ebc-kube-api-access-nfscv\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:46 crc kubenswrapper[4903]: I1126 22:46:46.275643 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b4ba05d-5933-445a-aa3a-6c7766b73ebc-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:46 crc kubenswrapper[4903]: I1126 22:46:46.431050 4903 generic.go:334] "Generic (PLEG): container finished" podID="0b4ba05d-5933-445a-aa3a-6c7766b73ebc" containerID="c64cfa475d004d95ce024f921923a3388a30f6f73eeb71768cf316463e5cce1f" exitCode=0 Nov 26 22:46:46 crc kubenswrapper[4903]: I1126 22:46:46.431123 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-29lbn" Nov 26 22:46:46 crc kubenswrapper[4903]: I1126 22:46:46.431133 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-29lbn" event={"ID":"0b4ba05d-5933-445a-aa3a-6c7766b73ebc","Type":"ContainerDied","Data":"c64cfa475d004d95ce024f921923a3388a30f6f73eeb71768cf316463e5cce1f"} Nov 26 22:46:46 crc kubenswrapper[4903]: I1126 22:46:46.431159 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-29lbn" event={"ID":"0b4ba05d-5933-445a-aa3a-6c7766b73ebc","Type":"ContainerDied","Data":"38c9881a1e414971837122377661bd65b5071746b64e74d09438cbc28197a71f"} Nov 26 22:46:46 crc kubenswrapper[4903]: I1126 22:46:46.431190 4903 scope.go:117] "RemoveContainer" containerID="c64cfa475d004d95ce024f921923a3388a30f6f73eeb71768cf316463e5cce1f" Nov 26 22:46:46 crc kubenswrapper[4903]: I1126 22:46:46.472940 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 22:46:46 crc kubenswrapper[4903]: I1126 22:46:46.477756 4903 scope.go:117] "RemoveContainer" containerID="c611cda39ed17323c5f29dd391cd105464f60ccaf85dadf1a5aad875d2dcd908" Nov 26 22:46:46 crc kubenswrapper[4903]: I1126 22:46:46.492472 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-29lbn"] Nov 26 22:46:46 crc kubenswrapper[4903]: I1126 22:46:46.492759 4903 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 22:46:46 crc kubenswrapper[4903]: I1126 22:46:46.508249 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-29lbn"] Nov 26 22:46:46 crc kubenswrapper[4903]: I1126 22:46:46.601861 4903 scope.go:117] "RemoveContainer" containerID="02b8a834f9b7176cf6444e1b2ba635103fafcc025b9e2940a23642451199c9a6" Nov 26 22:46:46 crc kubenswrapper[4903]: I1126 22:46:46.640888 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 26 22:46:46 crc kubenswrapper[4903]: I1126 22:46:46.674928 4903 scope.go:117] "RemoveContainer" containerID="c64cfa475d004d95ce024f921923a3388a30f6f73eeb71768cf316463e5cce1f" Nov 26 22:46:46 crc kubenswrapper[4903]: E1126 22:46:46.683885 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c64cfa475d004d95ce024f921923a3388a30f6f73eeb71768cf316463e5cce1f\": container with ID starting with c64cfa475d004d95ce024f921923a3388a30f6f73eeb71768cf316463e5cce1f not found: ID does not exist" containerID="c64cfa475d004d95ce024f921923a3388a30f6f73eeb71768cf316463e5cce1f" Nov 26 22:46:46 crc kubenswrapper[4903]: I1126 22:46:46.683925 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c64cfa475d004d95ce024f921923a3388a30f6f73eeb71768cf316463e5cce1f"} err="failed to get container status \"c64cfa475d004d95ce024f921923a3388a30f6f73eeb71768cf316463e5cce1f\": rpc error: code = NotFound desc = could not find container \"c64cfa475d004d95ce024f921923a3388a30f6f73eeb71768cf316463e5cce1f\": container with ID starting with c64cfa475d004d95ce024f921923a3388a30f6f73eeb71768cf316463e5cce1f not found: ID does not exist" Nov 26 22:46:46 crc kubenswrapper[4903]: I1126 22:46:46.683950 4903 scope.go:117] "RemoveContainer" containerID="c611cda39ed17323c5f29dd391cd105464f60ccaf85dadf1a5aad875d2dcd908" Nov 26 22:46:46 crc kubenswrapper[4903]: E1126 22:46:46.696606 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c611cda39ed17323c5f29dd391cd105464f60ccaf85dadf1a5aad875d2dcd908\": container with ID starting with c611cda39ed17323c5f29dd391cd105464f60ccaf85dadf1a5aad875d2dcd908 not found: ID does not exist" containerID="c611cda39ed17323c5f29dd391cd105464f60ccaf85dadf1a5aad875d2dcd908" Nov 26 22:46:46 crc kubenswrapper[4903]: I1126 22:46:46.696650 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c611cda39ed17323c5f29dd391cd105464f60ccaf85dadf1a5aad875d2dcd908"} err="failed to get container status \"c611cda39ed17323c5f29dd391cd105464f60ccaf85dadf1a5aad875d2dcd908\": rpc error: code = NotFound desc = could not find container \"c611cda39ed17323c5f29dd391cd105464f60ccaf85dadf1a5aad875d2dcd908\": container with ID starting with c611cda39ed17323c5f29dd391cd105464f60ccaf85dadf1a5aad875d2dcd908 not found: ID does not exist" Nov 26 22:46:46 crc kubenswrapper[4903]: I1126 22:46:46.696682 4903 scope.go:117] "RemoveContainer" containerID="02b8a834f9b7176cf6444e1b2ba635103fafcc025b9e2940a23642451199c9a6" Nov 26 22:46:46 crc kubenswrapper[4903]: E1126 22:46:46.702813 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"02b8a834f9b7176cf6444e1b2ba635103fafcc025b9e2940a23642451199c9a6\": container with ID starting with 02b8a834f9b7176cf6444e1b2ba635103fafcc025b9e2940a23642451199c9a6 not found: ID does not exist" containerID="02b8a834f9b7176cf6444e1b2ba635103fafcc025b9e2940a23642451199c9a6" Nov 26 22:46:46 crc kubenswrapper[4903]: I1126 22:46:46.702845 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02b8a834f9b7176cf6444e1b2ba635103fafcc025b9e2940a23642451199c9a6"} err="failed to get container status \"02b8a834f9b7176cf6444e1b2ba635103fafcc025b9e2940a23642451199c9a6\": rpc error: code = NotFound desc = could not find container \"02b8a834f9b7176cf6444e1b2ba635103fafcc025b9e2940a23642451199c9a6\": container with ID starting with 02b8a834f9b7176cf6444e1b2ba635103fafcc025b9e2940a23642451199c9a6 not found: ID does not exist" Nov 26 22:46:47 crc kubenswrapper[4903]: I1126 22:46:47.085793 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:46:47 crc kubenswrapper[4903]: I1126 22:46:47.086047 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d127bade-9b0b-4d82-bfa2-656c4986fb18" containerName="ceilometer-central-agent" containerID="cri-o://9f5decf06477c1a1398359f59a8d8eaf18c91645d99a60a157eb9ab186174c72" gracePeriod=30 Nov 26 22:46:47 crc kubenswrapper[4903]: I1126 22:46:47.086115 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d127bade-9b0b-4d82-bfa2-656c4986fb18" containerName="proxy-httpd" containerID="cri-o://b90763be8dc1a5dec32c3559f4c9233145c6c3339820d2dbd95db568a03f79fa" gracePeriod=30 Nov 26 22:46:47 crc kubenswrapper[4903]: I1126 22:46:47.086138 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d127bade-9b0b-4d82-bfa2-656c4986fb18" containerName="sg-core" containerID="cri-o://2e561a48010bb1b1c1e5906941a26e196895b38af9c2e7719a8cb2c80ba55195" gracePeriod=30 Nov 26 22:46:47 crc kubenswrapper[4903]: I1126 22:46:47.086184 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d127bade-9b0b-4d82-bfa2-656c4986fb18" containerName="ceilometer-notification-agent" containerID="cri-o://ed24b4025e554b205a1321eedb974bd4833df91dc4f099875f6089477db8e620" gracePeriod=30 Nov 26 22:46:47 crc kubenswrapper[4903]: I1126 22:46:47.466673 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"a0677cff-9cf4-4eba-bb4b-4fea82d38f71","Type":"ContainerStarted","Data":"1109753a5e3441fe2fba80fdd2afc1e350bde6741643eb5e6daba7d34dffd0e8"} Nov 26 22:46:47 crc kubenswrapper[4903]: I1126 22:46:47.470504 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"0f92d7db-9155-4bdc-8285-29091382434c","Type":"ContainerStarted","Data":"c9370870c217142ffa79bdd5101b6073bd34d63af74ab6d7e2770c5c8c45d54f"} Nov 26 22:46:47 crc kubenswrapper[4903]: I1126 22:46:47.470545 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"0f92d7db-9155-4bdc-8285-29091382434c","Type":"ContainerStarted","Data":"78b7d21fe26a62a27f63ef890c7bc4ead2909d6f4b77bae506b9ba11d7a981b6"} Nov 26 22:46:47 crc kubenswrapper[4903]: I1126 22:46:47.471658 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 26 22:46:47 crc kubenswrapper[4903]: I1126 22:46:47.478178 4903 generic.go:334] "Generic (PLEG): container finished" podID="d127bade-9b0b-4d82-bfa2-656c4986fb18" containerID="b90763be8dc1a5dec32c3559f4c9233145c6c3339820d2dbd95db568a03f79fa" exitCode=0 Nov 26 22:46:47 crc kubenswrapper[4903]: I1126 22:46:47.478204 4903 generic.go:334] "Generic (PLEG): container finished" podID="d127bade-9b0b-4d82-bfa2-656c4986fb18" containerID="2e561a48010bb1b1c1e5906941a26e196895b38af9c2e7719a8cb2c80ba55195" exitCode=2 Nov 26 22:46:47 crc kubenswrapper[4903]: I1126 22:46:47.478223 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d127bade-9b0b-4d82-bfa2-656c4986fb18","Type":"ContainerDied","Data":"b90763be8dc1a5dec32c3559f4c9233145c6c3339820d2dbd95db568a03f79fa"} Nov 26 22:46:47 crc kubenswrapper[4903]: I1126 22:46:47.478243 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d127bade-9b0b-4d82-bfa2-656c4986fb18","Type":"ContainerDied","Data":"2e561a48010bb1b1c1e5906941a26e196895b38af9c2e7719a8cb2c80ba55195"} Nov 26 22:46:47 crc kubenswrapper[4903]: I1126 22:46:47.497468 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.112984328 podStartE2EDuration="2.497446379s" podCreationTimestamp="2025-11-26 22:46:45 +0000 UTC" firstStartedPulling="2025-11-26 22:46:46.492536654 +0000 UTC m=+1535.182771554" lastFinishedPulling="2025-11-26 22:46:46.876998695 +0000 UTC m=+1535.567233605" observedRunningTime="2025-11-26 22:46:47.490426411 +0000 UTC m=+1536.180661311" watchObservedRunningTime="2025-11-26 22:46:47.497446379 +0000 UTC m=+1536.187681289" Nov 26 22:46:48 crc kubenswrapper[4903]: I1126 22:46:48.043444 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b4ba05d-5933-445a-aa3a-6c7766b73ebc" path="/var/lib/kubelet/pods/0b4ba05d-5933-445a-aa3a-6c7766b73ebc/volumes" Nov 26 22:46:48 crc kubenswrapper[4903]: I1126 22:46:48.509717 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"a0677cff-9cf4-4eba-bb4b-4fea82d38f71","Type":"ContainerStarted","Data":"2a953673333707970c323ec629b893f1b49d6e79cd7a178d9064232fd74defda"} Nov 26 22:46:48 crc kubenswrapper[4903]: I1126 22:46:48.533411 4903 generic.go:334] "Generic (PLEG): container finished" podID="d127bade-9b0b-4d82-bfa2-656c4986fb18" containerID="9f5decf06477c1a1398359f59a8d8eaf18c91645d99a60a157eb9ab186174c72" exitCode=0 Nov 26 22:46:48 crc kubenswrapper[4903]: I1126 22:46:48.534726 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d127bade-9b0b-4d82-bfa2-656c4986fb18","Type":"ContainerDied","Data":"9f5decf06477c1a1398359f59a8d8eaf18c91645d99a60a157eb9ab186174c72"} Nov 26 22:46:48 crc kubenswrapper[4903]: I1126 22:46:48.552578 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-0" podStartSLOduration=2.7810322579999998 podStartE2EDuration="3.55256415s" podCreationTimestamp="2025-11-26 22:46:45 +0000 UTC" firstStartedPulling="2025-11-26 22:46:46.703834055 +0000 UTC m=+1535.394068965" lastFinishedPulling="2025-11-26 22:46:47.475365957 +0000 UTC m=+1536.165600857" observedRunningTime="2025-11-26 22:46:48.529965024 +0000 UTC m=+1537.220199934" watchObservedRunningTime="2025-11-26 22:46:48.55256415 +0000 UTC m=+1537.242799060" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.179799 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.293348 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d127bade-9b0b-4d82-bfa2-656c4986fb18-log-httpd\") pod \"d127bade-9b0b-4d82-bfa2-656c4986fb18\" (UID: \"d127bade-9b0b-4d82-bfa2-656c4986fb18\") " Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.293422 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d127bade-9b0b-4d82-bfa2-656c4986fb18-scripts\") pod \"d127bade-9b0b-4d82-bfa2-656c4986fb18\" (UID: \"d127bade-9b0b-4d82-bfa2-656c4986fb18\") " Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.293482 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d127bade-9b0b-4d82-bfa2-656c4986fb18-sg-core-conf-yaml\") pod \"d127bade-9b0b-4d82-bfa2-656c4986fb18\" (UID: \"d127bade-9b0b-4d82-bfa2-656c4986fb18\") " Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.293509 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-djqvp\" (UniqueName: \"kubernetes.io/projected/d127bade-9b0b-4d82-bfa2-656c4986fb18-kube-api-access-djqvp\") pod \"d127bade-9b0b-4d82-bfa2-656c4986fb18\" (UID: \"d127bade-9b0b-4d82-bfa2-656c4986fb18\") " Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.293547 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d127bade-9b0b-4d82-bfa2-656c4986fb18-combined-ca-bundle\") pod \"d127bade-9b0b-4d82-bfa2-656c4986fb18\" (UID: \"d127bade-9b0b-4d82-bfa2-656c4986fb18\") " Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.293584 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d127bade-9b0b-4d82-bfa2-656c4986fb18-config-data\") pod \"d127bade-9b0b-4d82-bfa2-656c4986fb18\" (UID: \"d127bade-9b0b-4d82-bfa2-656c4986fb18\") " Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.293619 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d127bade-9b0b-4d82-bfa2-656c4986fb18-run-httpd\") pod \"d127bade-9b0b-4d82-bfa2-656c4986fb18\" (UID: \"d127bade-9b0b-4d82-bfa2-656c4986fb18\") " Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.293922 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d127bade-9b0b-4d82-bfa2-656c4986fb18-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "d127bade-9b0b-4d82-bfa2-656c4986fb18" (UID: "d127bade-9b0b-4d82-bfa2-656c4986fb18"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.294189 4903 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d127bade-9b0b-4d82-bfa2-656c4986fb18-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.294392 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d127bade-9b0b-4d82-bfa2-656c4986fb18-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "d127bade-9b0b-4d82-bfa2-656c4986fb18" (UID: "d127bade-9b0b-4d82-bfa2-656c4986fb18"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.316022 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d127bade-9b0b-4d82-bfa2-656c4986fb18-kube-api-access-djqvp" (OuterVolumeSpecName: "kube-api-access-djqvp") pod "d127bade-9b0b-4d82-bfa2-656c4986fb18" (UID: "d127bade-9b0b-4d82-bfa2-656c4986fb18"). InnerVolumeSpecName "kube-api-access-djqvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.317956 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d127bade-9b0b-4d82-bfa2-656c4986fb18-scripts" (OuterVolumeSpecName: "scripts") pod "d127bade-9b0b-4d82-bfa2-656c4986fb18" (UID: "d127bade-9b0b-4d82-bfa2-656c4986fb18"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.341708 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d127bade-9b0b-4d82-bfa2-656c4986fb18-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "d127bade-9b0b-4d82-bfa2-656c4986fb18" (UID: "d127bade-9b0b-4d82-bfa2-656c4986fb18"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.384015 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d127bade-9b0b-4d82-bfa2-656c4986fb18-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d127bade-9b0b-4d82-bfa2-656c4986fb18" (UID: "d127bade-9b0b-4d82-bfa2-656c4986fb18"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.395715 4903 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d127bade-9b0b-4d82-bfa2-656c4986fb18-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.395746 4903 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d127bade-9b0b-4d82-bfa2-656c4986fb18-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.395758 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-djqvp\" (UniqueName: \"kubernetes.io/projected/d127bade-9b0b-4d82-bfa2-656c4986fb18-kube-api-access-djqvp\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.395768 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d127bade-9b0b-4d82-bfa2-656c4986fb18-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.395776 4903 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d127bade-9b0b-4d82-bfa2-656c4986fb18-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.421177 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d127bade-9b0b-4d82-bfa2-656c4986fb18-config-data" (OuterVolumeSpecName: "config-data") pod "d127bade-9b0b-4d82-bfa2-656c4986fb18" (UID: "d127bade-9b0b-4d82-bfa2-656c4986fb18"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.498180 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d127bade-9b0b-4d82-bfa2-656c4986fb18-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.567539 4903 generic.go:334] "Generic (PLEG): container finished" podID="d127bade-9b0b-4d82-bfa2-656c4986fb18" containerID="ed24b4025e554b205a1321eedb974bd4833df91dc4f099875f6089477db8e620" exitCode=0 Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.567651 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.567647 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d127bade-9b0b-4d82-bfa2-656c4986fb18","Type":"ContainerDied","Data":"ed24b4025e554b205a1321eedb974bd4833df91dc4f099875f6089477db8e620"} Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.567899 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d127bade-9b0b-4d82-bfa2-656c4986fb18","Type":"ContainerDied","Data":"5cd91f1c22345cac742578da141c236d57ede672aadfca5dff2dabf3f0f5afc9"} Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.567930 4903 scope.go:117] "RemoveContainer" containerID="b90763be8dc1a5dec32c3559f4c9233145c6c3339820d2dbd95db568a03f79fa" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.608264 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.614658 4903 scope.go:117] "RemoveContainer" containerID="2e561a48010bb1b1c1e5906941a26e196895b38af9c2e7719a8cb2c80ba55195" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.628069 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.644320 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:46:51 crc kubenswrapper[4903]: E1126 22:46:51.645071 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b4ba05d-5933-445a-aa3a-6c7766b73ebc" containerName="extract-utilities" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.645101 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b4ba05d-5933-445a-aa3a-6c7766b73ebc" containerName="extract-utilities" Nov 26 22:46:51 crc kubenswrapper[4903]: E1126 22:46:51.645131 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d127bade-9b0b-4d82-bfa2-656c4986fb18" containerName="ceilometer-notification-agent" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.645141 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="d127bade-9b0b-4d82-bfa2-656c4986fb18" containerName="ceilometer-notification-agent" Nov 26 22:46:51 crc kubenswrapper[4903]: E1126 22:46:51.645180 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b4ba05d-5933-445a-aa3a-6c7766b73ebc" containerName="extract-content" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.645189 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b4ba05d-5933-445a-aa3a-6c7766b73ebc" containerName="extract-content" Nov 26 22:46:51 crc kubenswrapper[4903]: E1126 22:46:51.645215 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d127bade-9b0b-4d82-bfa2-656c4986fb18" containerName="sg-core" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.645223 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="d127bade-9b0b-4d82-bfa2-656c4986fb18" containerName="sg-core" Nov 26 22:46:51 crc kubenswrapper[4903]: E1126 22:46:51.645237 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b4ba05d-5933-445a-aa3a-6c7766b73ebc" containerName="registry-server" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.645245 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b4ba05d-5933-445a-aa3a-6c7766b73ebc" containerName="registry-server" Nov 26 22:46:51 crc kubenswrapper[4903]: E1126 22:46:51.645273 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d127bade-9b0b-4d82-bfa2-656c4986fb18" containerName="proxy-httpd" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.645283 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="d127bade-9b0b-4d82-bfa2-656c4986fb18" containerName="proxy-httpd" Nov 26 22:46:51 crc kubenswrapper[4903]: E1126 22:46:51.645299 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d127bade-9b0b-4d82-bfa2-656c4986fb18" containerName="ceilometer-central-agent" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.645307 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="d127bade-9b0b-4d82-bfa2-656c4986fb18" containerName="ceilometer-central-agent" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.645585 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="d127bade-9b0b-4d82-bfa2-656c4986fb18" containerName="proxy-httpd" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.645625 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="d127bade-9b0b-4d82-bfa2-656c4986fb18" containerName="ceilometer-central-agent" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.645650 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="d127bade-9b0b-4d82-bfa2-656c4986fb18" containerName="sg-core" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.645663 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b4ba05d-5933-445a-aa3a-6c7766b73ebc" containerName="registry-server" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.645678 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="d127bade-9b0b-4d82-bfa2-656c4986fb18" containerName="ceilometer-notification-agent" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.648468 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.655347 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.656308 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.656651 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.656850 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.685750 4903 scope.go:117] "RemoveContainer" containerID="ed24b4025e554b205a1321eedb974bd4833df91dc4f099875f6089477db8e620" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.717318 4903 scope.go:117] "RemoveContainer" containerID="9f5decf06477c1a1398359f59a8d8eaf18c91645d99a60a157eb9ab186174c72" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.740413 4903 scope.go:117] "RemoveContainer" containerID="b90763be8dc1a5dec32c3559f4c9233145c6c3339820d2dbd95db568a03f79fa" Nov 26 22:46:51 crc kubenswrapper[4903]: E1126 22:46:51.740863 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b90763be8dc1a5dec32c3559f4c9233145c6c3339820d2dbd95db568a03f79fa\": container with ID starting with b90763be8dc1a5dec32c3559f4c9233145c6c3339820d2dbd95db568a03f79fa not found: ID does not exist" containerID="b90763be8dc1a5dec32c3559f4c9233145c6c3339820d2dbd95db568a03f79fa" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.740899 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b90763be8dc1a5dec32c3559f4c9233145c6c3339820d2dbd95db568a03f79fa"} err="failed to get container status \"b90763be8dc1a5dec32c3559f4c9233145c6c3339820d2dbd95db568a03f79fa\": rpc error: code = NotFound desc = could not find container \"b90763be8dc1a5dec32c3559f4c9233145c6c3339820d2dbd95db568a03f79fa\": container with ID starting with b90763be8dc1a5dec32c3559f4c9233145c6c3339820d2dbd95db568a03f79fa not found: ID does not exist" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.740928 4903 scope.go:117] "RemoveContainer" containerID="2e561a48010bb1b1c1e5906941a26e196895b38af9c2e7719a8cb2c80ba55195" Nov 26 22:46:51 crc kubenswrapper[4903]: E1126 22:46:51.741267 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2e561a48010bb1b1c1e5906941a26e196895b38af9c2e7719a8cb2c80ba55195\": container with ID starting with 2e561a48010bb1b1c1e5906941a26e196895b38af9c2e7719a8cb2c80ba55195 not found: ID does not exist" containerID="2e561a48010bb1b1c1e5906941a26e196895b38af9c2e7719a8cb2c80ba55195" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.741290 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e561a48010bb1b1c1e5906941a26e196895b38af9c2e7719a8cb2c80ba55195"} err="failed to get container status \"2e561a48010bb1b1c1e5906941a26e196895b38af9c2e7719a8cb2c80ba55195\": rpc error: code = NotFound desc = could not find container \"2e561a48010bb1b1c1e5906941a26e196895b38af9c2e7719a8cb2c80ba55195\": container with ID starting with 2e561a48010bb1b1c1e5906941a26e196895b38af9c2e7719a8cb2c80ba55195 not found: ID does not exist" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.741302 4903 scope.go:117] "RemoveContainer" containerID="ed24b4025e554b205a1321eedb974bd4833df91dc4f099875f6089477db8e620" Nov 26 22:46:51 crc kubenswrapper[4903]: E1126 22:46:51.741563 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed24b4025e554b205a1321eedb974bd4833df91dc4f099875f6089477db8e620\": container with ID starting with ed24b4025e554b205a1321eedb974bd4833df91dc4f099875f6089477db8e620 not found: ID does not exist" containerID="ed24b4025e554b205a1321eedb974bd4833df91dc4f099875f6089477db8e620" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.741604 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed24b4025e554b205a1321eedb974bd4833df91dc4f099875f6089477db8e620"} err="failed to get container status \"ed24b4025e554b205a1321eedb974bd4833df91dc4f099875f6089477db8e620\": rpc error: code = NotFound desc = could not find container \"ed24b4025e554b205a1321eedb974bd4833df91dc4f099875f6089477db8e620\": container with ID starting with ed24b4025e554b205a1321eedb974bd4833df91dc4f099875f6089477db8e620 not found: ID does not exist" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.741635 4903 scope.go:117] "RemoveContainer" containerID="9f5decf06477c1a1398359f59a8d8eaf18c91645d99a60a157eb9ab186174c72" Nov 26 22:46:51 crc kubenswrapper[4903]: E1126 22:46:51.742094 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f5decf06477c1a1398359f59a8d8eaf18c91645d99a60a157eb9ab186174c72\": container with ID starting with 9f5decf06477c1a1398359f59a8d8eaf18c91645d99a60a157eb9ab186174c72 not found: ID does not exist" containerID="9f5decf06477c1a1398359f59a8d8eaf18c91645d99a60a157eb9ab186174c72" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.742130 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f5decf06477c1a1398359f59a8d8eaf18c91645d99a60a157eb9ab186174c72"} err="failed to get container status \"9f5decf06477c1a1398359f59a8d8eaf18c91645d99a60a157eb9ab186174c72\": rpc error: code = NotFound desc = could not find container \"9f5decf06477c1a1398359f59a8d8eaf18c91645d99a60a157eb9ab186174c72\": container with ID starting with 9f5decf06477c1a1398359f59a8d8eaf18c91645d99a60a157eb9ab186174c72 not found: ID does not exist" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.804254 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f6b85164-43b1-4607-8690-797692f5c02f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " pod="openstack/ceilometer-0" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.804569 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f6b85164-43b1-4607-8690-797692f5c02f-scripts\") pod \"ceilometer-0\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " pod="openstack/ceilometer-0" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.804643 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgn24\" (UniqueName: \"kubernetes.io/projected/f6b85164-43b1-4607-8690-797692f5c02f-kube-api-access-jgn24\") pod \"ceilometer-0\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " pod="openstack/ceilometer-0" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.804755 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6b85164-43b1-4607-8690-797692f5c02f-config-data\") pod \"ceilometer-0\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " pod="openstack/ceilometer-0" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.804929 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f6b85164-43b1-4607-8690-797692f5c02f-log-httpd\") pod \"ceilometer-0\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " pod="openstack/ceilometer-0" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.804978 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6b85164-43b1-4607-8690-797692f5c02f-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " pod="openstack/ceilometer-0" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.805083 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f6b85164-43b1-4607-8690-797692f5c02f-run-httpd\") pod \"ceilometer-0\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " pod="openstack/ceilometer-0" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.805486 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6b85164-43b1-4607-8690-797692f5c02f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " pod="openstack/ceilometer-0" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.907489 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6b85164-43b1-4607-8690-797692f5c02f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " pod="openstack/ceilometer-0" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.907567 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f6b85164-43b1-4607-8690-797692f5c02f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " pod="openstack/ceilometer-0" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.907627 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f6b85164-43b1-4607-8690-797692f5c02f-scripts\") pod \"ceilometer-0\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " pod="openstack/ceilometer-0" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.907653 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgn24\" (UniqueName: \"kubernetes.io/projected/f6b85164-43b1-4607-8690-797692f5c02f-kube-api-access-jgn24\") pod \"ceilometer-0\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " pod="openstack/ceilometer-0" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.907676 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6b85164-43b1-4607-8690-797692f5c02f-config-data\") pod \"ceilometer-0\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " pod="openstack/ceilometer-0" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.907723 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f6b85164-43b1-4607-8690-797692f5c02f-log-httpd\") pod \"ceilometer-0\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " pod="openstack/ceilometer-0" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.907738 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6b85164-43b1-4607-8690-797692f5c02f-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " pod="openstack/ceilometer-0" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.907769 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f6b85164-43b1-4607-8690-797692f5c02f-run-httpd\") pod \"ceilometer-0\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " pod="openstack/ceilometer-0" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.908324 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f6b85164-43b1-4607-8690-797692f5c02f-run-httpd\") pod \"ceilometer-0\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " pod="openstack/ceilometer-0" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.908335 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f6b85164-43b1-4607-8690-797692f5c02f-log-httpd\") pod \"ceilometer-0\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " pod="openstack/ceilometer-0" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.913269 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6b85164-43b1-4607-8690-797692f5c02f-config-data\") pod \"ceilometer-0\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " pod="openstack/ceilometer-0" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.913490 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f6b85164-43b1-4607-8690-797692f5c02f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " pod="openstack/ceilometer-0" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.913635 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6b85164-43b1-4607-8690-797692f5c02f-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " pod="openstack/ceilometer-0" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.919309 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f6b85164-43b1-4607-8690-797692f5c02f-scripts\") pod \"ceilometer-0\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " pod="openstack/ceilometer-0" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.919611 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6b85164-43b1-4607-8690-797692f5c02f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " pod="openstack/ceilometer-0" Nov 26 22:46:51 crc kubenswrapper[4903]: I1126 22:46:51.925479 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgn24\" (UniqueName: \"kubernetes.io/projected/f6b85164-43b1-4607-8690-797692f5c02f-kube-api-access-jgn24\") pod \"ceilometer-0\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " pod="openstack/ceilometer-0" Nov 26 22:46:52 crc kubenswrapper[4903]: I1126 22:46:52.005202 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:46:52 crc kubenswrapper[4903]: I1126 22:46:52.059450 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d127bade-9b0b-4d82-bfa2-656c4986fb18" path="/var/lib/kubelet/pods/d127bade-9b0b-4d82-bfa2-656c4986fb18/volumes" Nov 26 22:46:52 crc kubenswrapper[4903]: I1126 22:46:52.616820 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:46:53 crc kubenswrapper[4903]: I1126 22:46:53.624158 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f6b85164-43b1-4607-8690-797692f5c02f","Type":"ContainerStarted","Data":"bce5d81f816732ced5807ea71f52fb504a1d6a02c385cbe45b47b98f25627c63"} Nov 26 22:46:53 crc kubenswrapper[4903]: I1126 22:46:53.625662 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f6b85164-43b1-4607-8690-797692f5c02f","Type":"ContainerStarted","Data":"d8b5c146b08152e7175e1c505c183f7ca5d4ba47182921afb2f46096908e18c0"} Nov 26 22:46:54 crc kubenswrapper[4903]: I1126 22:46:54.399498 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-dz7dx"] Nov 26 22:46:54 crc kubenswrapper[4903]: I1126 22:46:54.415276 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-dz7dx"] Nov 26 22:46:54 crc kubenswrapper[4903]: I1126 22:46:54.489781 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-wsb64"] Nov 26 22:46:54 crc kubenswrapper[4903]: I1126 22:46:54.491537 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-wsb64" Nov 26 22:46:54 crc kubenswrapper[4903]: I1126 22:46:54.504630 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-wsb64"] Nov 26 22:46:54 crc kubenswrapper[4903]: I1126 22:46:54.655028 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f6b85164-43b1-4607-8690-797692f5c02f","Type":"ContainerStarted","Data":"0025e67dd90023052b5b9e43f33b81a74bf93853296b018ba93a29098f850793"} Nov 26 22:46:54 crc kubenswrapper[4903]: I1126 22:46:54.688160 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fh2gw\" (UniqueName: \"kubernetes.io/projected/4049fe04-7d20-41b8-b38c-9c0b39144fda-kube-api-access-fh2gw\") pod \"heat-db-sync-wsb64\" (UID: \"4049fe04-7d20-41b8-b38c-9c0b39144fda\") " pod="openstack/heat-db-sync-wsb64" Nov 26 22:46:54 crc kubenswrapper[4903]: I1126 22:46:54.688477 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4049fe04-7d20-41b8-b38c-9c0b39144fda-combined-ca-bundle\") pod \"heat-db-sync-wsb64\" (UID: \"4049fe04-7d20-41b8-b38c-9c0b39144fda\") " pod="openstack/heat-db-sync-wsb64" Nov 26 22:46:54 crc kubenswrapper[4903]: I1126 22:46:54.688653 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4049fe04-7d20-41b8-b38c-9c0b39144fda-config-data\") pod \"heat-db-sync-wsb64\" (UID: \"4049fe04-7d20-41b8-b38c-9c0b39144fda\") " pod="openstack/heat-db-sync-wsb64" Nov 26 22:46:54 crc kubenswrapper[4903]: I1126 22:46:54.791090 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fh2gw\" (UniqueName: \"kubernetes.io/projected/4049fe04-7d20-41b8-b38c-9c0b39144fda-kube-api-access-fh2gw\") pod \"heat-db-sync-wsb64\" (UID: \"4049fe04-7d20-41b8-b38c-9c0b39144fda\") " pod="openstack/heat-db-sync-wsb64" Nov 26 22:46:54 crc kubenswrapper[4903]: I1126 22:46:54.791186 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4049fe04-7d20-41b8-b38c-9c0b39144fda-combined-ca-bundle\") pod \"heat-db-sync-wsb64\" (UID: \"4049fe04-7d20-41b8-b38c-9c0b39144fda\") " pod="openstack/heat-db-sync-wsb64" Nov 26 22:46:54 crc kubenswrapper[4903]: I1126 22:46:54.791239 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4049fe04-7d20-41b8-b38c-9c0b39144fda-config-data\") pod \"heat-db-sync-wsb64\" (UID: \"4049fe04-7d20-41b8-b38c-9c0b39144fda\") " pod="openstack/heat-db-sync-wsb64" Nov 26 22:46:54 crc kubenswrapper[4903]: I1126 22:46:54.795018 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4049fe04-7d20-41b8-b38c-9c0b39144fda-combined-ca-bundle\") pod \"heat-db-sync-wsb64\" (UID: \"4049fe04-7d20-41b8-b38c-9c0b39144fda\") " pod="openstack/heat-db-sync-wsb64" Nov 26 22:46:54 crc kubenswrapper[4903]: I1126 22:46:54.796381 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4049fe04-7d20-41b8-b38c-9c0b39144fda-config-data\") pod \"heat-db-sync-wsb64\" (UID: \"4049fe04-7d20-41b8-b38c-9c0b39144fda\") " pod="openstack/heat-db-sync-wsb64" Nov 26 22:46:54 crc kubenswrapper[4903]: I1126 22:46:54.806065 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fh2gw\" (UniqueName: \"kubernetes.io/projected/4049fe04-7d20-41b8-b38c-9c0b39144fda-kube-api-access-fh2gw\") pod \"heat-db-sync-wsb64\" (UID: \"4049fe04-7d20-41b8-b38c-9c0b39144fda\") " pod="openstack/heat-db-sync-wsb64" Nov 26 22:46:54 crc kubenswrapper[4903]: I1126 22:46:54.820573 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-wsb64" Nov 26 22:46:55 crc kubenswrapper[4903]: I1126 22:46:55.406972 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-wsb64"] Nov 26 22:46:55 crc kubenswrapper[4903]: I1126 22:46:55.680635 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f6b85164-43b1-4607-8690-797692f5c02f","Type":"ContainerStarted","Data":"50dcabb3407d3bcee106bb0eed6605e23e61267ade39fe13d76f29c6f6dac686"} Nov 26 22:46:55 crc kubenswrapper[4903]: I1126 22:46:55.694527 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-wsb64" event={"ID":"4049fe04-7d20-41b8-b38c-9c0b39144fda","Type":"ContainerStarted","Data":"7fd5192cd82be51f1745d5b1e29cd0bb44101af05715b4affc9d0efce85970f6"} Nov 26 22:46:55 crc kubenswrapper[4903]: I1126 22:46:55.929496 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 26 22:46:56 crc kubenswrapper[4903]: I1126 22:46:56.042603 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a96189f-52eb-44aa-8638-96d516cd0eb3" path="/var/lib/kubelet/pods/8a96189f-52eb-44aa-8638-96d516cd0eb3/volumes" Nov 26 22:46:56 crc kubenswrapper[4903]: I1126 22:46:56.670329 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 22:46:56 crc kubenswrapper[4903]: I1126 22:46:56.727455 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f6b85164-43b1-4607-8690-797692f5c02f","Type":"ContainerStarted","Data":"2410bc318ada5bdfcc585dc9360eea0422c59faa0b56e639ed650748e2ff2daf"} Nov 26 22:46:56 crc kubenswrapper[4903]: I1126 22:46:56.728648 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 22:46:56 crc kubenswrapper[4903]: I1126 22:46:56.758663 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.153526867 podStartE2EDuration="5.758644202s" podCreationTimestamp="2025-11-26 22:46:51 +0000 UTC" firstStartedPulling="2025-11-26 22:46:52.60576678 +0000 UTC m=+1541.296001680" lastFinishedPulling="2025-11-26 22:46:56.210884105 +0000 UTC m=+1544.901119015" observedRunningTime="2025-11-26 22:46:56.750103553 +0000 UTC m=+1545.440338473" watchObservedRunningTime="2025-11-26 22:46:56.758644202 +0000 UTC m=+1545.448879112" Nov 26 22:46:57 crc kubenswrapper[4903]: I1126 22:46:57.666352 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 22:47:00 crc kubenswrapper[4903]: I1126 22:47:00.007207 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:47:00 crc kubenswrapper[4903]: I1126 22:47:00.007782 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f6b85164-43b1-4607-8690-797692f5c02f" containerName="ceilometer-central-agent" containerID="cri-o://bce5d81f816732ced5807ea71f52fb504a1d6a02c385cbe45b47b98f25627c63" gracePeriod=30 Nov 26 22:47:00 crc kubenswrapper[4903]: I1126 22:47:00.007901 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f6b85164-43b1-4607-8690-797692f5c02f" containerName="proxy-httpd" containerID="cri-o://2410bc318ada5bdfcc585dc9360eea0422c59faa0b56e639ed650748e2ff2daf" gracePeriod=30 Nov 26 22:47:00 crc kubenswrapper[4903]: I1126 22:47:00.007933 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f6b85164-43b1-4607-8690-797692f5c02f" containerName="sg-core" containerID="cri-o://50dcabb3407d3bcee106bb0eed6605e23e61267ade39fe13d76f29c6f6dac686" gracePeriod=30 Nov 26 22:47:00 crc kubenswrapper[4903]: I1126 22:47:00.007961 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f6b85164-43b1-4607-8690-797692f5c02f" containerName="ceilometer-notification-agent" containerID="cri-o://0025e67dd90023052b5b9e43f33b81a74bf93853296b018ba93a29098f850793" gracePeriod=30 Nov 26 22:47:00 crc kubenswrapper[4903]: I1126 22:47:00.783746 4903 generic.go:334] "Generic (PLEG): container finished" podID="f6b85164-43b1-4607-8690-797692f5c02f" containerID="2410bc318ada5bdfcc585dc9360eea0422c59faa0b56e639ed650748e2ff2daf" exitCode=0 Nov 26 22:47:00 crc kubenswrapper[4903]: I1126 22:47:00.784178 4903 generic.go:334] "Generic (PLEG): container finished" podID="f6b85164-43b1-4607-8690-797692f5c02f" containerID="50dcabb3407d3bcee106bb0eed6605e23e61267ade39fe13d76f29c6f6dac686" exitCode=2 Nov 26 22:47:00 crc kubenswrapper[4903]: I1126 22:47:00.784265 4903 generic.go:334] "Generic (PLEG): container finished" podID="f6b85164-43b1-4607-8690-797692f5c02f" containerID="0025e67dd90023052b5b9e43f33b81a74bf93853296b018ba93a29098f850793" exitCode=0 Nov 26 22:47:00 crc kubenswrapper[4903]: I1126 22:47:00.783825 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f6b85164-43b1-4607-8690-797692f5c02f","Type":"ContainerDied","Data":"2410bc318ada5bdfcc585dc9360eea0422c59faa0b56e639ed650748e2ff2daf"} Nov 26 22:47:00 crc kubenswrapper[4903]: I1126 22:47:00.784412 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f6b85164-43b1-4607-8690-797692f5c02f","Type":"ContainerDied","Data":"50dcabb3407d3bcee106bb0eed6605e23e61267ade39fe13d76f29c6f6dac686"} Nov 26 22:47:00 crc kubenswrapper[4903]: I1126 22:47:00.784470 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f6b85164-43b1-4607-8690-797692f5c02f","Type":"ContainerDied","Data":"0025e67dd90023052b5b9e43f33b81a74bf93853296b018ba93a29098f850793"} Nov 26 22:47:01 crc kubenswrapper[4903]: I1126 22:47:01.217239 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="a97b1b29-2461-47c7-a3f9-71837fe03413" containerName="rabbitmq" containerID="cri-o://2d3df56702b17befc1893f007a5b55c5842827f405f47c1858ec09e3ca3f571a" gracePeriod=604796 Nov 26 22:47:02 crc kubenswrapper[4903]: I1126 22:47:02.316965 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="910b9022-54fc-4f7d-b69b-bdb7661cb91d" containerName="rabbitmq" containerID="cri-o://0584530df49ff20fab14cd674291135a2d591f6fd7ce117d13a3ca007cce8f5d" gracePeriod=604796 Nov 26 22:47:06 crc kubenswrapper[4903]: I1126 22:47:06.499271 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="a97b1b29-2461-47c7-a3f9-71837fe03413" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.129:5671: connect: connection refused" Nov 26 22:47:06 crc kubenswrapper[4903]: I1126 22:47:06.902025 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="910b9022-54fc-4f7d-b69b-bdb7661cb91d" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.130:5671: connect: connection refused" Nov 26 22:47:07 crc kubenswrapper[4903]: I1126 22:47:07.935039 4903 generic.go:334] "Generic (PLEG): container finished" podID="a97b1b29-2461-47c7-a3f9-71837fe03413" containerID="2d3df56702b17befc1893f007a5b55c5842827f405f47c1858ec09e3ca3f571a" exitCode=0 Nov 26 22:47:07 crc kubenswrapper[4903]: I1126 22:47:07.935179 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"a97b1b29-2461-47c7-a3f9-71837fe03413","Type":"ContainerDied","Data":"2d3df56702b17befc1893f007a5b55c5842827f405f47c1858ec09e3ca3f571a"} Nov 26 22:47:08 crc kubenswrapper[4903]: I1126 22:47:08.953182 4903 generic.go:334] "Generic (PLEG): container finished" podID="910b9022-54fc-4f7d-b69b-bdb7661cb91d" containerID="0584530df49ff20fab14cd674291135a2d591f6fd7ce117d13a3ca007cce8f5d" exitCode=0 Nov 26 22:47:08 crc kubenswrapper[4903]: I1126 22:47:08.953296 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"910b9022-54fc-4f7d-b69b-bdb7661cb91d","Type":"ContainerDied","Data":"0584530df49ff20fab14cd674291135a2d591f6fd7ce117d13a3ca007cce8f5d"} Nov 26 22:47:11 crc kubenswrapper[4903]: I1126 22:47:11.019395 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7d84b4d45c-m9mfn"] Nov 26 22:47:11 crc kubenswrapper[4903]: I1126 22:47:11.023661 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" Nov 26 22:47:11 crc kubenswrapper[4903]: I1126 22:47:11.026000 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 26 22:47:11 crc kubenswrapper[4903]: I1126 22:47:11.108588 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7d84b4d45c-m9mfn"] Nov 26 22:47:11 crc kubenswrapper[4903]: I1126 22:47:11.172711 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-ovsdbserver-nb\") pod \"dnsmasq-dns-7d84b4d45c-m9mfn\" (UID: \"df3949c0-f412-4202-a768-e567ca5e2ea2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" Nov 26 22:47:11 crc kubenswrapper[4903]: I1126 22:47:11.172818 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-config\") pod \"dnsmasq-dns-7d84b4d45c-m9mfn\" (UID: \"df3949c0-f412-4202-a768-e567ca5e2ea2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" Nov 26 22:47:11 crc kubenswrapper[4903]: I1126 22:47:11.173458 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-openstack-edpm-ipam\") pod \"dnsmasq-dns-7d84b4d45c-m9mfn\" (UID: \"df3949c0-f412-4202-a768-e567ca5e2ea2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" Nov 26 22:47:11 crc kubenswrapper[4903]: I1126 22:47:11.173542 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-dns-swift-storage-0\") pod \"dnsmasq-dns-7d84b4d45c-m9mfn\" (UID: \"df3949c0-f412-4202-a768-e567ca5e2ea2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" Nov 26 22:47:11 crc kubenswrapper[4903]: I1126 22:47:11.173831 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-ovsdbserver-sb\") pod \"dnsmasq-dns-7d84b4d45c-m9mfn\" (UID: \"df3949c0-f412-4202-a768-e567ca5e2ea2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" Nov 26 22:47:11 crc kubenswrapper[4903]: I1126 22:47:11.174197 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-dns-svc\") pod \"dnsmasq-dns-7d84b4d45c-m9mfn\" (UID: \"df3949c0-f412-4202-a768-e567ca5e2ea2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" Nov 26 22:47:11 crc kubenswrapper[4903]: I1126 22:47:11.174298 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bp79p\" (UniqueName: \"kubernetes.io/projected/df3949c0-f412-4202-a768-e567ca5e2ea2-kube-api-access-bp79p\") pod \"dnsmasq-dns-7d84b4d45c-m9mfn\" (UID: \"df3949c0-f412-4202-a768-e567ca5e2ea2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" Nov 26 22:47:11 crc kubenswrapper[4903]: I1126 22:47:11.276537 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-ovsdbserver-nb\") pod \"dnsmasq-dns-7d84b4d45c-m9mfn\" (UID: \"df3949c0-f412-4202-a768-e567ca5e2ea2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" Nov 26 22:47:11 crc kubenswrapper[4903]: I1126 22:47:11.277580 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-ovsdbserver-nb\") pod \"dnsmasq-dns-7d84b4d45c-m9mfn\" (UID: \"df3949c0-f412-4202-a768-e567ca5e2ea2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" Nov 26 22:47:11 crc kubenswrapper[4903]: I1126 22:47:11.277868 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-config\") pod \"dnsmasq-dns-7d84b4d45c-m9mfn\" (UID: \"df3949c0-f412-4202-a768-e567ca5e2ea2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" Nov 26 22:47:11 crc kubenswrapper[4903]: I1126 22:47:11.277923 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-openstack-edpm-ipam\") pod \"dnsmasq-dns-7d84b4d45c-m9mfn\" (UID: \"df3949c0-f412-4202-a768-e567ca5e2ea2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" Nov 26 22:47:11 crc kubenswrapper[4903]: I1126 22:47:11.277955 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-dns-swift-storage-0\") pod \"dnsmasq-dns-7d84b4d45c-m9mfn\" (UID: \"df3949c0-f412-4202-a768-e567ca5e2ea2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" Nov 26 22:47:11 crc kubenswrapper[4903]: I1126 22:47:11.277976 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-ovsdbserver-sb\") pod \"dnsmasq-dns-7d84b4d45c-m9mfn\" (UID: \"df3949c0-f412-4202-a768-e567ca5e2ea2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" Nov 26 22:47:11 crc kubenswrapper[4903]: I1126 22:47:11.278001 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-dns-svc\") pod \"dnsmasq-dns-7d84b4d45c-m9mfn\" (UID: \"df3949c0-f412-4202-a768-e567ca5e2ea2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" Nov 26 22:47:11 crc kubenswrapper[4903]: I1126 22:47:11.278037 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bp79p\" (UniqueName: \"kubernetes.io/projected/df3949c0-f412-4202-a768-e567ca5e2ea2-kube-api-access-bp79p\") pod \"dnsmasq-dns-7d84b4d45c-m9mfn\" (UID: \"df3949c0-f412-4202-a768-e567ca5e2ea2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" Nov 26 22:47:11 crc kubenswrapper[4903]: I1126 22:47:11.278769 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-openstack-edpm-ipam\") pod \"dnsmasq-dns-7d84b4d45c-m9mfn\" (UID: \"df3949c0-f412-4202-a768-e567ca5e2ea2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" Nov 26 22:47:11 crc kubenswrapper[4903]: I1126 22:47:11.279065 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-dns-swift-storage-0\") pod \"dnsmasq-dns-7d84b4d45c-m9mfn\" (UID: \"df3949c0-f412-4202-a768-e567ca5e2ea2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" Nov 26 22:47:11 crc kubenswrapper[4903]: I1126 22:47:11.279077 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-dns-svc\") pod \"dnsmasq-dns-7d84b4d45c-m9mfn\" (UID: \"df3949c0-f412-4202-a768-e567ca5e2ea2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" Nov 26 22:47:11 crc kubenswrapper[4903]: I1126 22:47:11.279210 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-ovsdbserver-sb\") pod \"dnsmasq-dns-7d84b4d45c-m9mfn\" (UID: \"df3949c0-f412-4202-a768-e567ca5e2ea2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" Nov 26 22:47:11 crc kubenswrapper[4903]: I1126 22:47:11.279412 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-config\") pod \"dnsmasq-dns-7d84b4d45c-m9mfn\" (UID: \"df3949c0-f412-4202-a768-e567ca5e2ea2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" Nov 26 22:47:11 crc kubenswrapper[4903]: I1126 22:47:11.303212 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bp79p\" (UniqueName: \"kubernetes.io/projected/df3949c0-f412-4202-a768-e567ca5e2ea2-kube-api-access-bp79p\") pod \"dnsmasq-dns-7d84b4d45c-m9mfn\" (UID: \"df3949c0-f412-4202-a768-e567ca5e2ea2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" Nov 26 22:47:11 crc kubenswrapper[4903]: I1126 22:47:11.394408 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" Nov 26 22:47:15 crc kubenswrapper[4903]: E1126 22:47:15.325060 4903 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Nov 26 22:47:15 crc kubenswrapper[4903]: E1126 22:47:15.325498 4903 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Nov 26 22:47:15 crc kubenswrapper[4903]: E1126 22:47:15.325621 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fh2gw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-wsb64_openstack(4049fe04-7d20-41b8-b38c-9c0b39144fda): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 22:47:15 crc kubenswrapper[4903]: E1126 22:47:15.326761 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/heat-db-sync-wsb64" podUID="4049fe04-7d20-41b8-b38c-9c0b39144fda" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.465442 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.473454 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.609769 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7fg9r\" (UniqueName: \"kubernetes.io/projected/a97b1b29-2461-47c7-a3f9-71837fe03413-kube-api-access-7fg9r\") pod \"a97b1b29-2461-47c7-a3f9-71837fe03413\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.610018 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-thc2d\" (UniqueName: \"kubernetes.io/projected/910b9022-54fc-4f7d-b69b-bdb7661cb91d-kube-api-access-thc2d\") pod \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.610040 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/910b9022-54fc-4f7d-b69b-bdb7661cb91d-plugins-conf\") pod \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.610060 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/a97b1b29-2461-47c7-a3f9-71837fe03413-rabbitmq-erlang-cookie\") pod \"a97b1b29-2461-47c7-a3f9-71837fe03413\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.610148 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/a97b1b29-2461-47c7-a3f9-71837fe03413-rabbitmq-plugins\") pod \"a97b1b29-2461-47c7-a3f9-71837fe03413\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.610205 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/a97b1b29-2461-47c7-a3f9-71837fe03413-rabbitmq-tls\") pod \"a97b1b29-2461-47c7-a3f9-71837fe03413\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.610239 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/910b9022-54fc-4f7d-b69b-bdb7661cb91d-pod-info\") pod \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.610264 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/a97b1b29-2461-47c7-a3f9-71837fe03413-server-conf\") pod \"a97b1b29-2461-47c7-a3f9-71837fe03413\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.610290 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a97b1b29-2461-47c7-a3f9-71837fe03413-config-data\") pod \"a97b1b29-2461-47c7-a3f9-71837fe03413\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.610318 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.610342 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/910b9022-54fc-4f7d-b69b-bdb7661cb91d-config-data\") pod \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.610364 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/910b9022-54fc-4f7d-b69b-bdb7661cb91d-server-conf\") pod \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.610395 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"a97b1b29-2461-47c7-a3f9-71837fe03413\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.610413 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/910b9022-54fc-4f7d-b69b-bdb7661cb91d-rabbitmq-plugins\") pod \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.610435 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/910b9022-54fc-4f7d-b69b-bdb7661cb91d-rabbitmq-tls\") pod \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.610458 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/910b9022-54fc-4f7d-b69b-bdb7661cb91d-rabbitmq-confd\") pod \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.610487 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/a97b1b29-2461-47c7-a3f9-71837fe03413-erlang-cookie-secret\") pod \"a97b1b29-2461-47c7-a3f9-71837fe03413\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.610514 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/a97b1b29-2461-47c7-a3f9-71837fe03413-plugins-conf\") pod \"a97b1b29-2461-47c7-a3f9-71837fe03413\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.610551 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/a97b1b29-2461-47c7-a3f9-71837fe03413-rabbitmq-confd\") pod \"a97b1b29-2461-47c7-a3f9-71837fe03413\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.610595 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/910b9022-54fc-4f7d-b69b-bdb7661cb91d-rabbitmq-erlang-cookie\") pod \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.610643 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/910b9022-54fc-4f7d-b69b-bdb7661cb91d-erlang-cookie-secret\") pod \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\" (UID: \"910b9022-54fc-4f7d-b69b-bdb7661cb91d\") " Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.610668 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/a97b1b29-2461-47c7-a3f9-71837fe03413-pod-info\") pod \"a97b1b29-2461-47c7-a3f9-71837fe03413\" (UID: \"a97b1b29-2461-47c7-a3f9-71837fe03413\") " Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.616605 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/a97b1b29-2461-47c7-a3f9-71837fe03413-pod-info" (OuterVolumeSpecName: "pod-info") pod "a97b1b29-2461-47c7-a3f9-71837fe03413" (UID: "a97b1b29-2461-47c7-a3f9-71837fe03413"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.618756 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/910b9022-54fc-4f7d-b69b-bdb7661cb91d-kube-api-access-thc2d" (OuterVolumeSpecName: "kube-api-access-thc2d") pod "910b9022-54fc-4f7d-b69b-bdb7661cb91d" (UID: "910b9022-54fc-4f7d-b69b-bdb7661cb91d"). InnerVolumeSpecName "kube-api-access-thc2d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.619418 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a97b1b29-2461-47c7-a3f9-71837fe03413-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "a97b1b29-2461-47c7-a3f9-71837fe03413" (UID: "a97b1b29-2461-47c7-a3f9-71837fe03413"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.620358 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/910b9022-54fc-4f7d-b69b-bdb7661cb91d-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "910b9022-54fc-4f7d-b69b-bdb7661cb91d" (UID: "910b9022-54fc-4f7d-b69b-bdb7661cb91d"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.620885 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a97b1b29-2461-47c7-a3f9-71837fe03413-kube-api-access-7fg9r" (OuterVolumeSpecName: "kube-api-access-7fg9r") pod "a97b1b29-2461-47c7-a3f9-71837fe03413" (UID: "a97b1b29-2461-47c7-a3f9-71837fe03413"). InnerVolumeSpecName "kube-api-access-7fg9r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.621051 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a97b1b29-2461-47c7-a3f9-71837fe03413-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "a97b1b29-2461-47c7-a3f9-71837fe03413" (UID: "a97b1b29-2461-47c7-a3f9-71837fe03413"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.622855 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/910b9022-54fc-4f7d-b69b-bdb7661cb91d-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "910b9022-54fc-4f7d-b69b-bdb7661cb91d" (UID: "910b9022-54fc-4f7d-b69b-bdb7661cb91d"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.623616 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/910b9022-54fc-4f7d-b69b-bdb7661cb91d-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "910b9022-54fc-4f7d-b69b-bdb7661cb91d" (UID: "910b9022-54fc-4f7d-b69b-bdb7661cb91d"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.629571 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a97b1b29-2461-47c7-a3f9-71837fe03413-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "a97b1b29-2461-47c7-a3f9-71837fe03413" (UID: "a97b1b29-2461-47c7-a3f9-71837fe03413"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.634507 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "persistence") pod "910b9022-54fc-4f7d-b69b-bdb7661cb91d" (UID: "910b9022-54fc-4f7d-b69b-bdb7661cb91d"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.636331 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a97b1b29-2461-47c7-a3f9-71837fe03413-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "a97b1b29-2461-47c7-a3f9-71837fe03413" (UID: "a97b1b29-2461-47c7-a3f9-71837fe03413"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.643635 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/910b9022-54fc-4f7d-b69b-bdb7661cb91d-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "910b9022-54fc-4f7d-b69b-bdb7661cb91d" (UID: "910b9022-54fc-4f7d-b69b-bdb7661cb91d"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.643668 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a97b1b29-2461-47c7-a3f9-71837fe03413-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "a97b1b29-2461-47c7-a3f9-71837fe03413" (UID: "a97b1b29-2461-47c7-a3f9-71837fe03413"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.643751 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "persistence") pod "a97b1b29-2461-47c7-a3f9-71837fe03413" (UID: "a97b1b29-2461-47c7-a3f9-71837fe03413"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.648576 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/910b9022-54fc-4f7d-b69b-bdb7661cb91d-pod-info" (OuterVolumeSpecName: "pod-info") pod "910b9022-54fc-4f7d-b69b-bdb7661cb91d" (UID: "910b9022-54fc-4f7d-b69b-bdb7661cb91d"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.653071 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/910b9022-54fc-4f7d-b69b-bdb7661cb91d-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "910b9022-54fc-4f7d-b69b-bdb7661cb91d" (UID: "910b9022-54fc-4f7d-b69b-bdb7661cb91d"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.692337 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/910b9022-54fc-4f7d-b69b-bdb7661cb91d-config-data" (OuterVolumeSpecName: "config-data") pod "910b9022-54fc-4f7d-b69b-bdb7661cb91d" (UID: "910b9022-54fc-4f7d-b69b-bdb7661cb91d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.713177 4903 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/a97b1b29-2461-47c7-a3f9-71837fe03413-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.713226 4903 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/a97b1b29-2461-47c7-a3f9-71837fe03413-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.713236 4903 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/910b9022-54fc-4f7d-b69b-bdb7661cb91d-pod-info\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.713256 4903 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.713265 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/910b9022-54fc-4f7d-b69b-bdb7661cb91d-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.713299 4903 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.713307 4903 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/910b9022-54fc-4f7d-b69b-bdb7661cb91d-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.713316 4903 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/910b9022-54fc-4f7d-b69b-bdb7661cb91d-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.713326 4903 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/a97b1b29-2461-47c7-a3f9-71837fe03413-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.713335 4903 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/a97b1b29-2461-47c7-a3f9-71837fe03413-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.713345 4903 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/910b9022-54fc-4f7d-b69b-bdb7661cb91d-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.713374 4903 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/910b9022-54fc-4f7d-b69b-bdb7661cb91d-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.713382 4903 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/a97b1b29-2461-47c7-a3f9-71837fe03413-pod-info\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.713391 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7fg9r\" (UniqueName: \"kubernetes.io/projected/a97b1b29-2461-47c7-a3f9-71837fe03413-kube-api-access-7fg9r\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.713399 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-thc2d\" (UniqueName: \"kubernetes.io/projected/910b9022-54fc-4f7d-b69b-bdb7661cb91d-kube-api-access-thc2d\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.713407 4903 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/910b9022-54fc-4f7d-b69b-bdb7661cb91d-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.713416 4903 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/a97b1b29-2461-47c7-a3f9-71837fe03413-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.724302 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a97b1b29-2461-47c7-a3f9-71837fe03413-config-data" (OuterVolumeSpecName: "config-data") pod "a97b1b29-2461-47c7-a3f9-71837fe03413" (UID: "a97b1b29-2461-47c7-a3f9-71837fe03413"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.747351 4903 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.748341 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/910b9022-54fc-4f7d-b69b-bdb7661cb91d-server-conf" (OuterVolumeSpecName: "server-conf") pod "910b9022-54fc-4f7d-b69b-bdb7661cb91d" (UID: "910b9022-54fc-4f7d-b69b-bdb7661cb91d"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.770463 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a97b1b29-2461-47c7-a3f9-71837fe03413-server-conf" (OuterVolumeSpecName: "server-conf") pod "a97b1b29-2461-47c7-a3f9-71837fe03413" (UID: "a97b1b29-2461-47c7-a3f9-71837fe03413"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.772986 4903 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.815463 4903 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/a97b1b29-2461-47c7-a3f9-71837fe03413-server-conf\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.815649 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a97b1b29-2461-47c7-a3f9-71837fe03413-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.815740 4903 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.815798 4903 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/910b9022-54fc-4f7d-b69b-bdb7661cb91d-server-conf\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.815864 4903 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.827715 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7d84b4d45c-m9mfn"] Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.828520 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/910b9022-54fc-4f7d-b69b-bdb7661cb91d-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "910b9022-54fc-4f7d-b69b-bdb7661cb91d" (UID: "910b9022-54fc-4f7d-b69b-bdb7661cb91d"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.847826 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a97b1b29-2461-47c7-a3f9-71837fe03413-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "a97b1b29-2461-47c7-a3f9-71837fe03413" (UID: "a97b1b29-2461-47c7-a3f9-71837fe03413"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.918041 4903 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/910b9022-54fc-4f7d-b69b-bdb7661cb91d-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:15 crc kubenswrapper[4903]: I1126 22:47:15.918075 4903 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/a97b1b29-2461-47c7-a3f9-71837fe03413-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.066760 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"910b9022-54fc-4f7d-b69b-bdb7661cb91d","Type":"ContainerDied","Data":"20a245738a3ce9bf04c5af13564381e9e68c5840419c912276ff90749b0754f4"} Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.066783 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.067475 4903 scope.go:117] "RemoveContainer" containerID="0584530df49ff20fab14cd674291135a2d591f6fd7ce117d13a3ca007cce8f5d" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.071476 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"a97b1b29-2461-47c7-a3f9-71837fe03413","Type":"ContainerDied","Data":"affb2988258f7d0b7bc50347170cbab5ade5832c1bbc5cc8a914c76e6f8415d0"} Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.071568 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.074212 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" event={"ID":"df3949c0-f412-4202-a768-e567ca5e2ea2","Type":"ContainerStarted","Data":"c37412d3b0bc8b51a5009dd9d5cf689e5df75243c5ed080e0fe5ecae8c8a8a4e"} Nov 26 22:47:16 crc kubenswrapper[4903]: E1126 22:47:16.075589 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-wsb64" podUID="4049fe04-7d20-41b8-b38c-9c0b39144fda" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.115074 4903 scope.go:117] "RemoveContainer" containerID="4dd0882b808a2194123234623a000bbdf57499bf825ea99e6092344a44365cc1" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.134157 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.164587 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.229612 4903 scope.go:117] "RemoveContainer" containerID="2d3df56702b17befc1893f007a5b55c5842827f405f47c1858ec09e3ca3f571a" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.250375 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.266234 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.273926 4903 scope.go:117] "RemoveContainer" containerID="04e34959aebfbba31367cd19f0fe0bba4187ce0a734a6ea30825e6142070d487" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.279799 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 22:47:16 crc kubenswrapper[4903]: E1126 22:47:16.280283 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a97b1b29-2461-47c7-a3f9-71837fe03413" containerName="rabbitmq" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.280304 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="a97b1b29-2461-47c7-a3f9-71837fe03413" containerName="rabbitmq" Nov 26 22:47:16 crc kubenswrapper[4903]: E1126 22:47:16.280326 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="910b9022-54fc-4f7d-b69b-bdb7661cb91d" containerName="rabbitmq" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.280333 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="910b9022-54fc-4f7d-b69b-bdb7661cb91d" containerName="rabbitmq" Nov 26 22:47:16 crc kubenswrapper[4903]: E1126 22:47:16.280355 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="910b9022-54fc-4f7d-b69b-bdb7661cb91d" containerName="setup-container" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.280361 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="910b9022-54fc-4f7d-b69b-bdb7661cb91d" containerName="setup-container" Nov 26 22:47:16 crc kubenswrapper[4903]: E1126 22:47:16.280373 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a97b1b29-2461-47c7-a3f9-71837fe03413" containerName="setup-container" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.280379 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="a97b1b29-2461-47c7-a3f9-71837fe03413" containerName="setup-container" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.280592 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="a97b1b29-2461-47c7-a3f9-71837fe03413" containerName="rabbitmq" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.280625 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="910b9022-54fc-4f7d-b69b-bdb7661cb91d" containerName="rabbitmq" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.282773 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.284363 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-qhzgz" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.286289 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.286601 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.287572 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.288432 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.288665 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.288860 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.296680 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.299803 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.303049 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.305729 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.305990 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.306245 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.306973 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.307256 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-xmp9c" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.308953 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.317187 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.336743 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.432593 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5c06e745-2d71-48e5-9cf2-e361471b9b74-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.432650 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f32ba682-7919-4290-adff-40b16ea07fed-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.432678 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f32ba682-7919-4290-adff-40b16ea07fed-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.432734 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f32ba682-7919-4290-adff-40b16ea07fed-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.432924 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.433022 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5c06e745-2d71-48e5-9cf2-e361471b9b74-pod-info\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.433079 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5c06e745-2d71-48e5-9cf2-e361471b9b74-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.433122 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5c06e745-2d71-48e5-9cf2-e361471b9b74-config-data\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.433191 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f32ba682-7919-4290-adff-40b16ea07fed-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.433427 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5c06e745-2d71-48e5-9cf2-e361471b9b74-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.433473 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f32ba682-7919-4290-adff-40b16ea07fed-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.433510 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5c06e745-2d71-48e5-9cf2-e361471b9b74-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.433531 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5c06e745-2d71-48e5-9cf2-e361471b9b74-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.433546 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2fgt\" (UniqueName: \"kubernetes.io/projected/5c06e745-2d71-48e5-9cf2-e361471b9b74-kube-api-access-v2fgt\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.433645 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f32ba682-7919-4290-adff-40b16ea07fed-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.433778 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f32ba682-7919-4290-adff-40b16ea07fed-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.433841 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dk4n8\" (UniqueName: \"kubernetes.io/projected/f32ba682-7919-4290-adff-40b16ea07fed-kube-api-access-dk4n8\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.433895 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.433955 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5c06e745-2d71-48e5-9cf2-e361471b9b74-server-conf\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.434022 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5c06e745-2d71-48e5-9cf2-e361471b9b74-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.434057 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f32ba682-7919-4290-adff-40b16ea07fed-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.434095 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f32ba682-7919-4290-adff-40b16ea07fed-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.536247 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f32ba682-7919-4290-adff-40b16ea07fed-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.536305 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5c06e745-2d71-48e5-9cf2-e361471b9b74-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.536332 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f32ba682-7919-4290-adff-40b16ea07fed-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.536359 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5c06e745-2d71-48e5-9cf2-e361471b9b74-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.536379 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5c06e745-2d71-48e5-9cf2-e361471b9b74-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.536394 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v2fgt\" (UniqueName: \"kubernetes.io/projected/5c06e745-2d71-48e5-9cf2-e361471b9b74-kube-api-access-v2fgt\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.536419 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f32ba682-7919-4290-adff-40b16ea07fed-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.536448 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f32ba682-7919-4290-adff-40b16ea07fed-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.536472 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dk4n8\" (UniqueName: \"kubernetes.io/projected/f32ba682-7919-4290-adff-40b16ea07fed-kube-api-access-dk4n8\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.536490 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.536516 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5c06e745-2d71-48e5-9cf2-e361471b9b74-server-conf\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.536544 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5c06e745-2d71-48e5-9cf2-e361471b9b74-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.536560 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f32ba682-7919-4290-adff-40b16ea07fed-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.536579 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f32ba682-7919-4290-adff-40b16ea07fed-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.536597 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5c06e745-2d71-48e5-9cf2-e361471b9b74-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.536613 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f32ba682-7919-4290-adff-40b16ea07fed-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.536633 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f32ba682-7919-4290-adff-40b16ea07fed-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.536665 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f32ba682-7919-4290-adff-40b16ea07fed-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.536715 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.536743 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5c06e745-2d71-48e5-9cf2-e361471b9b74-pod-info\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.536765 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5c06e745-2d71-48e5-9cf2-e361471b9b74-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.536783 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5c06e745-2d71-48e5-9cf2-e361471b9b74-config-data\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.537583 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5c06e745-2d71-48e5-9cf2-e361471b9b74-config-data\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.537842 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5c06e745-2d71-48e5-9cf2-e361471b9b74-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.539667 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f32ba682-7919-4290-adff-40b16ea07fed-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.542429 4903 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.542492 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f32ba682-7919-4290-adff-40b16ea07fed-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.542626 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f32ba682-7919-4290-adff-40b16ea07fed-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.542891 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f32ba682-7919-4290-adff-40b16ea07fed-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.542899 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5c06e745-2d71-48e5-9cf2-e361471b9b74-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.542996 4903 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.543055 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5c06e745-2d71-48e5-9cf2-e361471b9b74-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.544051 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5c06e745-2d71-48e5-9cf2-e361471b9b74-server-conf\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.544543 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f32ba682-7919-4290-adff-40b16ea07fed-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.552526 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5c06e745-2d71-48e5-9cf2-e361471b9b74-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.555367 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f32ba682-7919-4290-adff-40b16ea07fed-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.555523 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f32ba682-7919-4290-adff-40b16ea07fed-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.556177 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f32ba682-7919-4290-adff-40b16ea07fed-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.556228 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5c06e745-2d71-48e5-9cf2-e361471b9b74-pod-info\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.556670 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f32ba682-7919-4290-adff-40b16ea07fed-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.576320 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5c06e745-2d71-48e5-9cf2-e361471b9b74-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.576553 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5c06e745-2d71-48e5-9cf2-e361471b9b74-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.593415 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dk4n8\" (UniqueName: \"kubernetes.io/projected/f32ba682-7919-4290-adff-40b16ea07fed-kube-api-access-dk4n8\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.600192 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2fgt\" (UniqueName: \"kubernetes.io/projected/5c06e745-2d71-48e5-9cf2-e361471b9b74-kube-api-access-v2fgt\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.649992 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f32ba682-7919-4290-adff-40b16ea07fed\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.659364 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"5c06e745-2d71-48e5-9cf2-e361471b9b74\") " pod="openstack/rabbitmq-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.889016 4903 scope.go:117] "RemoveContainer" containerID="1d049408fa0a73a548c742ac563b8c3a2115d0b9a16ccce00190d9939da1947f" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.923041 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:16 crc kubenswrapper[4903]: I1126 22:47:16.940284 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 22:47:17 crc kubenswrapper[4903]: I1126 22:47:17.095900 4903 generic.go:334] "Generic (PLEG): container finished" podID="df3949c0-f412-4202-a768-e567ca5e2ea2" containerID="bfa54c17926c9d121cd37ecce1d070ae6eace4e2522d45fdb836ef1c63f5108d" exitCode=0 Nov 26 22:47:17 crc kubenswrapper[4903]: I1126 22:47:17.095961 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" event={"ID":"df3949c0-f412-4202-a768-e567ca5e2ea2","Type":"ContainerDied","Data":"bfa54c17926c9d121cd37ecce1d070ae6eace4e2522d45fdb836ef1c63f5108d"} Nov 26 22:47:17 crc kubenswrapper[4903]: W1126 22:47:17.470104 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf32ba682_7919_4290_adff_40b16ea07fed.slice/crio-dfe71f9900d7308ae9a0269290fe1bbb93a5fedc17e0cce72d17bf6537ace555 WatchSource:0}: Error finding container dfe71f9900d7308ae9a0269290fe1bbb93a5fedc17e0cce72d17bf6537ace555: Status 404 returned error can't find the container with id dfe71f9900d7308ae9a0269290fe1bbb93a5fedc17e0cce72d17bf6537ace555 Nov 26 22:47:17 crc kubenswrapper[4903]: I1126 22:47:17.480801 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 22:47:17 crc kubenswrapper[4903]: W1126 22:47:17.488251 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5c06e745_2d71_48e5_9cf2_e361471b9b74.slice/crio-07f63b856ef014d16618240436eb3a1c6653d2f11863e1180ef689934751d748 WatchSource:0}: Error finding container 07f63b856ef014d16618240436eb3a1c6653d2f11863e1180ef689934751d748: Status 404 returned error can't find the container with id 07f63b856ef014d16618240436eb3a1c6653d2f11863e1180ef689934751d748 Nov 26 22:47:17 crc kubenswrapper[4903]: I1126 22:47:17.492763 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 22:47:18 crc kubenswrapper[4903]: I1126 22:47:18.045530 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="910b9022-54fc-4f7d-b69b-bdb7661cb91d" path="/var/lib/kubelet/pods/910b9022-54fc-4f7d-b69b-bdb7661cb91d/volumes" Nov 26 22:47:18 crc kubenswrapper[4903]: I1126 22:47:18.048665 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a97b1b29-2461-47c7-a3f9-71837fe03413" path="/var/lib/kubelet/pods/a97b1b29-2461-47c7-a3f9-71837fe03413/volumes" Nov 26 22:47:18 crc kubenswrapper[4903]: I1126 22:47:18.124404 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" event={"ID":"df3949c0-f412-4202-a768-e567ca5e2ea2","Type":"ContainerStarted","Data":"dc95b161ccd03f809a192035c9e5aad5ea04e6e47adb24e72d59886d9b58fe23"} Nov 26 22:47:18 crc kubenswrapper[4903]: I1126 22:47:18.127205 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"5c06e745-2d71-48e5-9cf2-e361471b9b74","Type":"ContainerStarted","Data":"07f63b856ef014d16618240436eb3a1c6653d2f11863e1180ef689934751d748"} Nov 26 22:47:18 crc kubenswrapper[4903]: I1126 22:47:18.129183 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f32ba682-7919-4290-adff-40b16ea07fed","Type":"ContainerStarted","Data":"dfe71f9900d7308ae9a0269290fe1bbb93a5fedc17e0cce72d17bf6537ace555"} Nov 26 22:47:18 crc kubenswrapper[4903]: I1126 22:47:18.155348 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" podStartSLOduration=8.155334516 podStartE2EDuration="8.155334516s" podCreationTimestamp="2025-11-26 22:47:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:47:18.152456129 +0000 UTC m=+1566.842691039" watchObservedRunningTime="2025-11-26 22:47:18.155334516 +0000 UTC m=+1566.845569426" Nov 26 22:47:19 crc kubenswrapper[4903]: I1126 22:47:19.139664 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" Nov 26 22:47:20 crc kubenswrapper[4903]: I1126 22:47:20.160214 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f32ba682-7919-4290-adff-40b16ea07fed","Type":"ContainerStarted","Data":"936946124e3bc647506c397c430c3fbd4e1989ba82887176defc159462b6d068"} Nov 26 22:47:21 crc kubenswrapper[4903]: I1126 22:47:21.180767 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"5c06e745-2d71-48e5-9cf2-e361471b9b74","Type":"ContainerStarted","Data":"19af02228d2db1e30009d1a17e9a0c893fd9d7e1efe8d9fdf3b218e8c76ee7bc"} Nov 26 22:47:22 crc kubenswrapper[4903]: I1126 22:47:22.009139 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="f6b85164-43b1-4607-8690-797692f5c02f" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.1.7:3000/\": dial tcp 10.217.1.7:3000: connect: connection refused" Nov 26 22:47:26 crc kubenswrapper[4903]: I1126 22:47:26.396883 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" Nov 26 22:47:26 crc kubenswrapper[4903]: I1126 22:47:26.485527 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b7bbf7cf9-9lccp"] Nov 26 22:47:26 crc kubenswrapper[4903]: I1126 22:47:26.485782 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" podUID="ec06e192-e766-4bb7-9c9d-1d2dd8058270" containerName="dnsmasq-dns" containerID="cri-o://bb11b45b8283f2e74de1aff09b45e7b83add9610b5bb0bb25101757b60e3988f" gracePeriod=10 Nov 26 22:47:26 crc kubenswrapper[4903]: I1126 22:47:26.656013 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6f6df4f56c-4fkw5"] Nov 26 22:47:26 crc kubenswrapper[4903]: I1126 22:47:26.658002 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f6df4f56c-4fkw5" Nov 26 22:47:26 crc kubenswrapper[4903]: I1126 22:47:26.684491 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6f6df4f56c-4fkw5"] Nov 26 22:47:26 crc kubenswrapper[4903]: I1126 22:47:26.709007 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9af2401e-79f8-4a02-be46-995607766071-config\") pod \"dnsmasq-dns-6f6df4f56c-4fkw5\" (UID: \"9af2401e-79f8-4a02-be46-995607766071\") " pod="openstack/dnsmasq-dns-6f6df4f56c-4fkw5" Nov 26 22:47:26 crc kubenswrapper[4903]: I1126 22:47:26.709090 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9af2401e-79f8-4a02-be46-995607766071-ovsdbserver-nb\") pod \"dnsmasq-dns-6f6df4f56c-4fkw5\" (UID: \"9af2401e-79f8-4a02-be46-995607766071\") " pod="openstack/dnsmasq-dns-6f6df4f56c-4fkw5" Nov 26 22:47:26 crc kubenswrapper[4903]: I1126 22:47:26.709156 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9af2401e-79f8-4a02-be46-995607766071-dns-swift-storage-0\") pod \"dnsmasq-dns-6f6df4f56c-4fkw5\" (UID: \"9af2401e-79f8-4a02-be46-995607766071\") " pod="openstack/dnsmasq-dns-6f6df4f56c-4fkw5" Nov 26 22:47:26 crc kubenswrapper[4903]: I1126 22:47:26.709253 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9af2401e-79f8-4a02-be46-995607766071-ovsdbserver-sb\") pod \"dnsmasq-dns-6f6df4f56c-4fkw5\" (UID: \"9af2401e-79f8-4a02-be46-995607766071\") " pod="openstack/dnsmasq-dns-6f6df4f56c-4fkw5" Nov 26 22:47:26 crc kubenswrapper[4903]: I1126 22:47:26.709331 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/9af2401e-79f8-4a02-be46-995607766071-openstack-edpm-ipam\") pod \"dnsmasq-dns-6f6df4f56c-4fkw5\" (UID: \"9af2401e-79f8-4a02-be46-995607766071\") " pod="openstack/dnsmasq-dns-6f6df4f56c-4fkw5" Nov 26 22:47:26 crc kubenswrapper[4903]: I1126 22:47:26.709776 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9af2401e-79f8-4a02-be46-995607766071-dns-svc\") pod \"dnsmasq-dns-6f6df4f56c-4fkw5\" (UID: \"9af2401e-79f8-4a02-be46-995607766071\") " pod="openstack/dnsmasq-dns-6f6df4f56c-4fkw5" Nov 26 22:47:26 crc kubenswrapper[4903]: I1126 22:47:26.709905 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hdbgc\" (UniqueName: \"kubernetes.io/projected/9af2401e-79f8-4a02-be46-995607766071-kube-api-access-hdbgc\") pod \"dnsmasq-dns-6f6df4f56c-4fkw5\" (UID: \"9af2401e-79f8-4a02-be46-995607766071\") " pod="openstack/dnsmasq-dns-6f6df4f56c-4fkw5" Nov 26 22:47:26 crc kubenswrapper[4903]: I1126 22:47:26.811527 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hdbgc\" (UniqueName: \"kubernetes.io/projected/9af2401e-79f8-4a02-be46-995607766071-kube-api-access-hdbgc\") pod \"dnsmasq-dns-6f6df4f56c-4fkw5\" (UID: \"9af2401e-79f8-4a02-be46-995607766071\") " pod="openstack/dnsmasq-dns-6f6df4f56c-4fkw5" Nov 26 22:47:26 crc kubenswrapper[4903]: I1126 22:47:26.811585 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9af2401e-79f8-4a02-be46-995607766071-config\") pod \"dnsmasq-dns-6f6df4f56c-4fkw5\" (UID: \"9af2401e-79f8-4a02-be46-995607766071\") " pod="openstack/dnsmasq-dns-6f6df4f56c-4fkw5" Nov 26 22:47:26 crc kubenswrapper[4903]: I1126 22:47:26.811614 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9af2401e-79f8-4a02-be46-995607766071-ovsdbserver-nb\") pod \"dnsmasq-dns-6f6df4f56c-4fkw5\" (UID: \"9af2401e-79f8-4a02-be46-995607766071\") " pod="openstack/dnsmasq-dns-6f6df4f56c-4fkw5" Nov 26 22:47:26 crc kubenswrapper[4903]: I1126 22:47:26.811640 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9af2401e-79f8-4a02-be46-995607766071-dns-swift-storage-0\") pod \"dnsmasq-dns-6f6df4f56c-4fkw5\" (UID: \"9af2401e-79f8-4a02-be46-995607766071\") " pod="openstack/dnsmasq-dns-6f6df4f56c-4fkw5" Nov 26 22:47:26 crc kubenswrapper[4903]: I1126 22:47:26.811679 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9af2401e-79f8-4a02-be46-995607766071-ovsdbserver-sb\") pod \"dnsmasq-dns-6f6df4f56c-4fkw5\" (UID: \"9af2401e-79f8-4a02-be46-995607766071\") " pod="openstack/dnsmasq-dns-6f6df4f56c-4fkw5" Nov 26 22:47:26 crc kubenswrapper[4903]: I1126 22:47:26.811770 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/9af2401e-79f8-4a02-be46-995607766071-openstack-edpm-ipam\") pod \"dnsmasq-dns-6f6df4f56c-4fkw5\" (UID: \"9af2401e-79f8-4a02-be46-995607766071\") " pod="openstack/dnsmasq-dns-6f6df4f56c-4fkw5" Nov 26 22:47:26 crc kubenswrapper[4903]: I1126 22:47:26.811906 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9af2401e-79f8-4a02-be46-995607766071-dns-svc\") pod \"dnsmasq-dns-6f6df4f56c-4fkw5\" (UID: \"9af2401e-79f8-4a02-be46-995607766071\") " pod="openstack/dnsmasq-dns-6f6df4f56c-4fkw5" Nov 26 22:47:26 crc kubenswrapper[4903]: I1126 22:47:26.812644 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9af2401e-79f8-4a02-be46-995607766071-dns-svc\") pod \"dnsmasq-dns-6f6df4f56c-4fkw5\" (UID: \"9af2401e-79f8-4a02-be46-995607766071\") " pod="openstack/dnsmasq-dns-6f6df4f56c-4fkw5" Nov 26 22:47:26 crc kubenswrapper[4903]: I1126 22:47:26.812647 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9af2401e-79f8-4a02-be46-995607766071-config\") pod \"dnsmasq-dns-6f6df4f56c-4fkw5\" (UID: \"9af2401e-79f8-4a02-be46-995607766071\") " pod="openstack/dnsmasq-dns-6f6df4f56c-4fkw5" Nov 26 22:47:26 crc kubenswrapper[4903]: I1126 22:47:26.813005 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9af2401e-79f8-4a02-be46-995607766071-ovsdbserver-nb\") pod \"dnsmasq-dns-6f6df4f56c-4fkw5\" (UID: \"9af2401e-79f8-4a02-be46-995607766071\") " pod="openstack/dnsmasq-dns-6f6df4f56c-4fkw5" Nov 26 22:47:26 crc kubenswrapper[4903]: I1126 22:47:26.813199 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9af2401e-79f8-4a02-be46-995607766071-ovsdbserver-sb\") pod \"dnsmasq-dns-6f6df4f56c-4fkw5\" (UID: \"9af2401e-79f8-4a02-be46-995607766071\") " pod="openstack/dnsmasq-dns-6f6df4f56c-4fkw5" Nov 26 22:47:26 crc kubenswrapper[4903]: I1126 22:47:26.813573 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9af2401e-79f8-4a02-be46-995607766071-dns-swift-storage-0\") pod \"dnsmasq-dns-6f6df4f56c-4fkw5\" (UID: \"9af2401e-79f8-4a02-be46-995607766071\") " pod="openstack/dnsmasq-dns-6f6df4f56c-4fkw5" Nov 26 22:47:26 crc kubenswrapper[4903]: I1126 22:47:26.813804 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/9af2401e-79f8-4a02-be46-995607766071-openstack-edpm-ipam\") pod \"dnsmasq-dns-6f6df4f56c-4fkw5\" (UID: \"9af2401e-79f8-4a02-be46-995607766071\") " pod="openstack/dnsmasq-dns-6f6df4f56c-4fkw5" Nov 26 22:47:26 crc kubenswrapper[4903]: I1126 22:47:26.831125 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hdbgc\" (UniqueName: \"kubernetes.io/projected/9af2401e-79f8-4a02-be46-995607766071-kube-api-access-hdbgc\") pod \"dnsmasq-dns-6f6df4f56c-4fkw5\" (UID: \"9af2401e-79f8-4a02-be46-995607766071\") " pod="openstack/dnsmasq-dns-6f6df4f56c-4fkw5" Nov 26 22:47:26 crc kubenswrapper[4903]: I1126 22:47:26.992749 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f6df4f56c-4fkw5" Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.202594 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.284772 4903 generic.go:334] "Generic (PLEG): container finished" podID="ec06e192-e766-4bb7-9c9d-1d2dd8058270" containerID="bb11b45b8283f2e74de1aff09b45e7b83add9610b5bb0bb25101757b60e3988f" exitCode=0 Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.284856 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" event={"ID":"ec06e192-e766-4bb7-9c9d-1d2dd8058270","Type":"ContainerDied","Data":"bb11b45b8283f2e74de1aff09b45e7b83add9610b5bb0bb25101757b60e3988f"} Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.284889 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.284908 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7bbf7cf9-9lccp" event={"ID":"ec06e192-e766-4bb7-9c9d-1d2dd8058270","Type":"ContainerDied","Data":"51e7343869de99bfbe2e7e76495b0a5465045c4a7c22af78901ae0dddcd90aa6"} Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.284929 4903 scope.go:117] "RemoveContainer" containerID="bb11b45b8283f2e74de1aff09b45e7b83add9610b5bb0bb25101757b60e3988f" Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.314002 4903 scope.go:117] "RemoveContainer" containerID="841e0731bbba857acefe2e1ae0cdf5f03a08a1905f60e9706af5bff18c00d0eb" Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.323528 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ec06e192-e766-4bb7-9c9d-1d2dd8058270-config\") pod \"ec06e192-e766-4bb7-9c9d-1d2dd8058270\" (UID: \"ec06e192-e766-4bb7-9c9d-1d2dd8058270\") " Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.323575 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ec06e192-e766-4bb7-9c9d-1d2dd8058270-ovsdbserver-sb\") pod \"ec06e192-e766-4bb7-9c9d-1d2dd8058270\" (UID: \"ec06e192-e766-4bb7-9c9d-1d2dd8058270\") " Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.323607 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mt4vk\" (UniqueName: \"kubernetes.io/projected/ec06e192-e766-4bb7-9c9d-1d2dd8058270-kube-api-access-mt4vk\") pod \"ec06e192-e766-4bb7-9c9d-1d2dd8058270\" (UID: \"ec06e192-e766-4bb7-9c9d-1d2dd8058270\") " Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.323722 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ec06e192-e766-4bb7-9c9d-1d2dd8058270-ovsdbserver-nb\") pod \"ec06e192-e766-4bb7-9c9d-1d2dd8058270\" (UID: \"ec06e192-e766-4bb7-9c9d-1d2dd8058270\") " Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.323856 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ec06e192-e766-4bb7-9c9d-1d2dd8058270-dns-svc\") pod \"ec06e192-e766-4bb7-9c9d-1d2dd8058270\" (UID: \"ec06e192-e766-4bb7-9c9d-1d2dd8058270\") " Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.323983 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ec06e192-e766-4bb7-9c9d-1d2dd8058270-dns-swift-storage-0\") pod \"ec06e192-e766-4bb7-9c9d-1d2dd8058270\" (UID: \"ec06e192-e766-4bb7-9c9d-1d2dd8058270\") " Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.330894 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec06e192-e766-4bb7-9c9d-1d2dd8058270-kube-api-access-mt4vk" (OuterVolumeSpecName: "kube-api-access-mt4vk") pod "ec06e192-e766-4bb7-9c9d-1d2dd8058270" (UID: "ec06e192-e766-4bb7-9c9d-1d2dd8058270"). InnerVolumeSpecName "kube-api-access-mt4vk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.347955 4903 scope.go:117] "RemoveContainer" containerID="bb11b45b8283f2e74de1aff09b45e7b83add9610b5bb0bb25101757b60e3988f" Nov 26 22:47:27 crc kubenswrapper[4903]: E1126 22:47:27.348503 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb11b45b8283f2e74de1aff09b45e7b83add9610b5bb0bb25101757b60e3988f\": container with ID starting with bb11b45b8283f2e74de1aff09b45e7b83add9610b5bb0bb25101757b60e3988f not found: ID does not exist" containerID="bb11b45b8283f2e74de1aff09b45e7b83add9610b5bb0bb25101757b60e3988f" Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.348546 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb11b45b8283f2e74de1aff09b45e7b83add9610b5bb0bb25101757b60e3988f"} err="failed to get container status \"bb11b45b8283f2e74de1aff09b45e7b83add9610b5bb0bb25101757b60e3988f\": rpc error: code = NotFound desc = could not find container \"bb11b45b8283f2e74de1aff09b45e7b83add9610b5bb0bb25101757b60e3988f\": container with ID starting with bb11b45b8283f2e74de1aff09b45e7b83add9610b5bb0bb25101757b60e3988f not found: ID does not exist" Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.348572 4903 scope.go:117] "RemoveContainer" containerID="841e0731bbba857acefe2e1ae0cdf5f03a08a1905f60e9706af5bff18c00d0eb" Nov 26 22:47:27 crc kubenswrapper[4903]: E1126 22:47:27.348830 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"841e0731bbba857acefe2e1ae0cdf5f03a08a1905f60e9706af5bff18c00d0eb\": container with ID starting with 841e0731bbba857acefe2e1ae0cdf5f03a08a1905f60e9706af5bff18c00d0eb not found: ID does not exist" containerID="841e0731bbba857acefe2e1ae0cdf5f03a08a1905f60e9706af5bff18c00d0eb" Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.348856 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"841e0731bbba857acefe2e1ae0cdf5f03a08a1905f60e9706af5bff18c00d0eb"} err="failed to get container status \"841e0731bbba857acefe2e1ae0cdf5f03a08a1905f60e9706af5bff18c00d0eb\": rpc error: code = NotFound desc = could not find container \"841e0731bbba857acefe2e1ae0cdf5f03a08a1905f60e9706af5bff18c00d0eb\": container with ID starting with 841e0731bbba857acefe2e1ae0cdf5f03a08a1905f60e9706af5bff18c00d0eb not found: ID does not exist" Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.399634 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ec06e192-e766-4bb7-9c9d-1d2dd8058270-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ec06e192-e766-4bb7-9c9d-1d2dd8058270" (UID: "ec06e192-e766-4bb7-9c9d-1d2dd8058270"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.401141 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ec06e192-e766-4bb7-9c9d-1d2dd8058270-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ec06e192-e766-4bb7-9c9d-1d2dd8058270" (UID: "ec06e192-e766-4bb7-9c9d-1d2dd8058270"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.412838 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ec06e192-e766-4bb7-9c9d-1d2dd8058270-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ec06e192-e766-4bb7-9c9d-1d2dd8058270" (UID: "ec06e192-e766-4bb7-9c9d-1d2dd8058270"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.427065 4903 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ec06e192-e766-4bb7-9c9d-1d2dd8058270-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.427096 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mt4vk\" (UniqueName: \"kubernetes.io/projected/ec06e192-e766-4bb7-9c9d-1d2dd8058270-kube-api-access-mt4vk\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.427108 4903 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ec06e192-e766-4bb7-9c9d-1d2dd8058270-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.427123 4903 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ec06e192-e766-4bb7-9c9d-1d2dd8058270-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.447028 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ec06e192-e766-4bb7-9c9d-1d2dd8058270-config" (OuterVolumeSpecName: "config") pod "ec06e192-e766-4bb7-9c9d-1d2dd8058270" (UID: "ec06e192-e766-4bb7-9c9d-1d2dd8058270"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.451445 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ec06e192-e766-4bb7-9c9d-1d2dd8058270-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "ec06e192-e766-4bb7-9c9d-1d2dd8058270" (UID: "ec06e192-e766-4bb7-9c9d-1d2dd8058270"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.514620 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6f6df4f56c-4fkw5"] Nov 26 22:47:27 crc kubenswrapper[4903]: W1126 22:47:27.515555 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9af2401e_79f8_4a02_be46_995607766071.slice/crio-06b3763919b6770eab5b61833618cf9fd5f4f99aae2db7621e1cae86a7fdfa60 WatchSource:0}: Error finding container 06b3763919b6770eab5b61833618cf9fd5f4f99aae2db7621e1cae86a7fdfa60: Status 404 returned error can't find the container with id 06b3763919b6770eab5b61833618cf9fd5f4f99aae2db7621e1cae86a7fdfa60 Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.529358 4903 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ec06e192-e766-4bb7-9c9d-1d2dd8058270-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.529388 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ec06e192-e766-4bb7-9c9d-1d2dd8058270-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.623173 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b7bbf7cf9-9lccp"] Nov 26 22:47:27 crc kubenswrapper[4903]: I1126 22:47:27.634106 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6b7bbf7cf9-9lccp"] Nov 26 22:47:28 crc kubenswrapper[4903]: I1126 22:47:28.041234 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec06e192-e766-4bb7-9c9d-1d2dd8058270" path="/var/lib/kubelet/pods/ec06e192-e766-4bb7-9c9d-1d2dd8058270/volumes" Nov 26 22:47:28 crc kubenswrapper[4903]: I1126 22:47:28.297818 4903 generic.go:334] "Generic (PLEG): container finished" podID="9af2401e-79f8-4a02-be46-995607766071" containerID="3ef9b7e236a44c2e2d6c5ecc22818acd35f964f08770e3cbdd4b382e3a2c68f3" exitCode=0 Nov 26 22:47:28 crc kubenswrapper[4903]: I1126 22:47:28.297892 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f6df4f56c-4fkw5" event={"ID":"9af2401e-79f8-4a02-be46-995607766071","Type":"ContainerDied","Data":"3ef9b7e236a44c2e2d6c5ecc22818acd35f964f08770e3cbdd4b382e3a2c68f3"} Nov 26 22:47:28 crc kubenswrapper[4903]: I1126 22:47:28.297924 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f6df4f56c-4fkw5" event={"ID":"9af2401e-79f8-4a02-be46-995607766071","Type":"ContainerStarted","Data":"06b3763919b6770eab5b61833618cf9fd5f4f99aae2db7621e1cae86a7fdfa60"} Nov 26 22:47:29 crc kubenswrapper[4903]: I1126 22:47:29.316526 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f6df4f56c-4fkw5" event={"ID":"9af2401e-79f8-4a02-be46-995607766071","Type":"ContainerStarted","Data":"b905db256c09e9d7c0e7043a3d226d06e3fb2aceff3b2c9f085b824745c8ba64"} Nov 26 22:47:29 crc kubenswrapper[4903]: I1126 22:47:29.316878 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6f6df4f56c-4fkw5" Nov 26 22:47:30 crc kubenswrapper[4903]: I1126 22:47:30.375193 4903 generic.go:334] "Generic (PLEG): container finished" podID="f6b85164-43b1-4607-8690-797692f5c02f" containerID="bce5d81f816732ced5807ea71f52fb504a1d6a02c385cbe45b47b98f25627c63" exitCode=137 Nov 26 22:47:30 crc kubenswrapper[4903]: I1126 22:47:30.375298 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f6b85164-43b1-4607-8690-797692f5c02f","Type":"ContainerDied","Data":"bce5d81f816732ced5807ea71f52fb504a1d6a02c385cbe45b47b98f25627c63"} Nov 26 22:47:30 crc kubenswrapper[4903]: I1126 22:47:30.378772 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-wsb64" event={"ID":"4049fe04-7d20-41b8-b38c-9c0b39144fda","Type":"ContainerStarted","Data":"36775ec553d738035ad20c1f2e715106486713e54f3914df078813de829f61b9"} Nov 26 22:47:30 crc kubenswrapper[4903]: I1126 22:47:30.398843 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-wsb64" podStartSLOduration=2.553332844 podStartE2EDuration="36.398829772s" podCreationTimestamp="2025-11-26 22:46:54 +0000 UTC" firstStartedPulling="2025-11-26 22:46:55.401106138 +0000 UTC m=+1544.091341038" lastFinishedPulling="2025-11-26 22:47:29.246603026 +0000 UTC m=+1577.936837966" observedRunningTime="2025-11-26 22:47:30.396669794 +0000 UTC m=+1579.086904714" watchObservedRunningTime="2025-11-26 22:47:30.398829772 +0000 UTC m=+1579.089064682" Nov 26 22:47:30 crc kubenswrapper[4903]: I1126 22:47:30.404411 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6f6df4f56c-4fkw5" podStartSLOduration=4.404404991 podStartE2EDuration="4.404404991s" podCreationTimestamp="2025-11-26 22:47:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:47:29.338719116 +0000 UTC m=+1578.028954036" watchObservedRunningTime="2025-11-26 22:47:30.404404991 +0000 UTC m=+1579.094639901" Nov 26 22:47:30 crc kubenswrapper[4903]: I1126 22:47:30.736346 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:47:30 crc kubenswrapper[4903]: I1126 22:47:30.850373 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6b85164-43b1-4607-8690-797692f5c02f-ceilometer-tls-certs\") pod \"f6b85164-43b1-4607-8690-797692f5c02f\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " Nov 26 22:47:30 crc kubenswrapper[4903]: I1126 22:47:30.850528 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f6b85164-43b1-4607-8690-797692f5c02f-sg-core-conf-yaml\") pod \"f6b85164-43b1-4607-8690-797692f5c02f\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " Nov 26 22:47:30 crc kubenswrapper[4903]: I1126 22:47:30.850732 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f6b85164-43b1-4607-8690-797692f5c02f-scripts\") pod \"f6b85164-43b1-4607-8690-797692f5c02f\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " Nov 26 22:47:30 crc kubenswrapper[4903]: I1126 22:47:30.850796 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6b85164-43b1-4607-8690-797692f5c02f-config-data\") pod \"f6b85164-43b1-4607-8690-797692f5c02f\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " Nov 26 22:47:30 crc kubenswrapper[4903]: I1126 22:47:30.850831 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f6b85164-43b1-4607-8690-797692f5c02f-run-httpd\") pod \"f6b85164-43b1-4607-8690-797692f5c02f\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " Nov 26 22:47:30 crc kubenswrapper[4903]: I1126 22:47:30.850855 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6b85164-43b1-4607-8690-797692f5c02f-combined-ca-bundle\") pod \"f6b85164-43b1-4607-8690-797692f5c02f\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " Nov 26 22:47:30 crc kubenswrapper[4903]: I1126 22:47:30.850945 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jgn24\" (UniqueName: \"kubernetes.io/projected/f6b85164-43b1-4607-8690-797692f5c02f-kube-api-access-jgn24\") pod \"f6b85164-43b1-4607-8690-797692f5c02f\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " Nov 26 22:47:30 crc kubenswrapper[4903]: I1126 22:47:30.851082 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6b85164-43b1-4607-8690-797692f5c02f-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f6b85164-43b1-4607-8690-797692f5c02f" (UID: "f6b85164-43b1-4607-8690-797692f5c02f"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:47:30 crc kubenswrapper[4903]: I1126 22:47:30.851507 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f6b85164-43b1-4607-8690-797692f5c02f-log-httpd\") pod \"f6b85164-43b1-4607-8690-797692f5c02f\" (UID: \"f6b85164-43b1-4607-8690-797692f5c02f\") " Nov 26 22:47:30 crc kubenswrapper[4903]: I1126 22:47:30.851772 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6b85164-43b1-4607-8690-797692f5c02f-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f6b85164-43b1-4607-8690-797692f5c02f" (UID: "f6b85164-43b1-4607-8690-797692f5c02f"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:47:30 crc kubenswrapper[4903]: I1126 22:47:30.852217 4903 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f6b85164-43b1-4607-8690-797692f5c02f-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:30 crc kubenswrapper[4903]: I1126 22:47:30.852245 4903 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f6b85164-43b1-4607-8690-797692f5c02f-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:30 crc kubenswrapper[4903]: I1126 22:47:30.858808 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6b85164-43b1-4607-8690-797692f5c02f-scripts" (OuterVolumeSpecName: "scripts") pod "f6b85164-43b1-4607-8690-797692f5c02f" (UID: "f6b85164-43b1-4607-8690-797692f5c02f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:47:30 crc kubenswrapper[4903]: I1126 22:47:30.865133 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6b85164-43b1-4607-8690-797692f5c02f-kube-api-access-jgn24" (OuterVolumeSpecName: "kube-api-access-jgn24") pod "f6b85164-43b1-4607-8690-797692f5c02f" (UID: "f6b85164-43b1-4607-8690-797692f5c02f"). InnerVolumeSpecName "kube-api-access-jgn24". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:47:30 crc kubenswrapper[4903]: I1126 22:47:30.890517 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6b85164-43b1-4607-8690-797692f5c02f-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f6b85164-43b1-4607-8690-797692f5c02f" (UID: "f6b85164-43b1-4607-8690-797692f5c02f"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:47:30 crc kubenswrapper[4903]: I1126 22:47:30.955653 4903 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f6b85164-43b1-4607-8690-797692f5c02f-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:30 crc kubenswrapper[4903]: I1126 22:47:30.964636 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jgn24\" (UniqueName: \"kubernetes.io/projected/f6b85164-43b1-4607-8690-797692f5c02f-kube-api-access-jgn24\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:30 crc kubenswrapper[4903]: I1126 22:47:30.961028 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6b85164-43b1-4607-8690-797692f5c02f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f6b85164-43b1-4607-8690-797692f5c02f" (UID: "f6b85164-43b1-4607-8690-797692f5c02f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:47:30 crc kubenswrapper[4903]: I1126 22:47:30.963334 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6b85164-43b1-4607-8690-797692f5c02f-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "f6b85164-43b1-4607-8690-797692f5c02f" (UID: "f6b85164-43b1-4607-8690-797692f5c02f"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:47:30 crc kubenswrapper[4903]: I1126 22:47:30.964920 4903 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f6b85164-43b1-4607-8690-797692f5c02f-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:30 crc kubenswrapper[4903]: I1126 22:47:30.993763 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6b85164-43b1-4607-8690-797692f5c02f-config-data" (OuterVolumeSpecName: "config-data") pod "f6b85164-43b1-4607-8690-797692f5c02f" (UID: "f6b85164-43b1-4607-8690-797692f5c02f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.067004 4903 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6b85164-43b1-4607-8690-797692f5c02f-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.067187 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6b85164-43b1-4607-8690-797692f5c02f-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.067298 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6b85164-43b1-4607-8690-797692f5c02f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.408536 4903 generic.go:334] "Generic (PLEG): container finished" podID="4049fe04-7d20-41b8-b38c-9c0b39144fda" containerID="36775ec553d738035ad20c1f2e715106486713e54f3914df078813de829f61b9" exitCode=0 Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.408653 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-wsb64" event={"ID":"4049fe04-7d20-41b8-b38c-9c0b39144fda","Type":"ContainerDied","Data":"36775ec553d738035ad20c1f2e715106486713e54f3914df078813de829f61b9"} Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.416417 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f6b85164-43b1-4607-8690-797692f5c02f","Type":"ContainerDied","Data":"d8b5c146b08152e7175e1c505c183f7ca5d4ba47182921afb2f46096908e18c0"} Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.416463 4903 scope.go:117] "RemoveContainer" containerID="2410bc318ada5bdfcc585dc9360eea0422c59faa0b56e639ed650748e2ff2daf" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.416596 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.491912 4903 scope.go:117] "RemoveContainer" containerID="50dcabb3407d3bcee106bb0eed6605e23e61267ade39fe13d76f29c6f6dac686" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.496244 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.507090 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.518267 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:47:31 crc kubenswrapper[4903]: E1126 22:47:31.518943 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec06e192-e766-4bb7-9c9d-1d2dd8058270" containerName="init" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.519009 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec06e192-e766-4bb7-9c9d-1d2dd8058270" containerName="init" Nov 26 22:47:31 crc kubenswrapper[4903]: E1126 22:47:31.519083 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6b85164-43b1-4607-8690-797692f5c02f" containerName="ceilometer-notification-agent" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.519133 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6b85164-43b1-4607-8690-797692f5c02f" containerName="ceilometer-notification-agent" Nov 26 22:47:31 crc kubenswrapper[4903]: E1126 22:47:31.519187 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec06e192-e766-4bb7-9c9d-1d2dd8058270" containerName="dnsmasq-dns" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.519255 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec06e192-e766-4bb7-9c9d-1d2dd8058270" containerName="dnsmasq-dns" Nov 26 22:47:31 crc kubenswrapper[4903]: E1126 22:47:31.519348 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6b85164-43b1-4607-8690-797692f5c02f" containerName="proxy-httpd" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.519407 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6b85164-43b1-4607-8690-797692f5c02f" containerName="proxy-httpd" Nov 26 22:47:31 crc kubenswrapper[4903]: E1126 22:47:31.519481 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6b85164-43b1-4607-8690-797692f5c02f" containerName="ceilometer-central-agent" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.519548 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6b85164-43b1-4607-8690-797692f5c02f" containerName="ceilometer-central-agent" Nov 26 22:47:31 crc kubenswrapper[4903]: E1126 22:47:31.519640 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6b85164-43b1-4607-8690-797692f5c02f" containerName="sg-core" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.519803 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6b85164-43b1-4607-8690-797692f5c02f" containerName="sg-core" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.520708 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6b85164-43b1-4607-8690-797692f5c02f" containerName="proxy-httpd" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.520817 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec06e192-e766-4bb7-9c9d-1d2dd8058270" containerName="dnsmasq-dns" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.520883 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6b85164-43b1-4607-8690-797692f5c02f" containerName="ceilometer-central-agent" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.520950 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6b85164-43b1-4607-8690-797692f5c02f" containerName="sg-core" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.521018 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6b85164-43b1-4607-8690-797692f5c02f" containerName="ceilometer-notification-agent" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.523470 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.534215 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.534351 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.549122 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.555423 4903 scope.go:117] "RemoveContainer" containerID="0025e67dd90023052b5b9e43f33b81a74bf93853296b018ba93a29098f850793" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.565537 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.622999 4903 scope.go:117] "RemoveContainer" containerID="bce5d81f816732ced5807ea71f52fb504a1d6a02c385cbe45b47b98f25627c63" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.688373 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c528520-edac-42d3-a81c-f5aca4d05266-log-httpd\") pod \"ceilometer-0\" (UID: \"8c528520-edac-42d3-a81c-f5aca4d05266\") " pod="openstack/ceilometer-0" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.688776 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c528520-edac-42d3-a81c-f5aca4d05266-config-data\") pod \"ceilometer-0\" (UID: \"8c528520-edac-42d3-a81c-f5aca4d05266\") " pod="openstack/ceilometer-0" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.688902 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c528520-edac-42d3-a81c-f5aca4d05266-run-httpd\") pod \"ceilometer-0\" (UID: \"8c528520-edac-42d3-a81c-f5aca4d05266\") " pod="openstack/ceilometer-0" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.689023 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c528520-edac-42d3-a81c-f5aca4d05266-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8c528520-edac-42d3-a81c-f5aca4d05266\") " pod="openstack/ceilometer-0" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.689248 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c528520-edac-42d3-a81c-f5aca4d05266-scripts\") pod \"ceilometer-0\" (UID: \"8c528520-edac-42d3-a81c-f5aca4d05266\") " pod="openstack/ceilometer-0" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.689458 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8c528520-edac-42d3-a81c-f5aca4d05266-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8c528520-edac-42d3-a81c-f5aca4d05266\") " pod="openstack/ceilometer-0" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.689611 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bwgj\" (UniqueName: \"kubernetes.io/projected/8c528520-edac-42d3-a81c-f5aca4d05266-kube-api-access-2bwgj\") pod \"ceilometer-0\" (UID: \"8c528520-edac-42d3-a81c-f5aca4d05266\") " pod="openstack/ceilometer-0" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.689801 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c528520-edac-42d3-a81c-f5aca4d05266-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"8c528520-edac-42d3-a81c-f5aca4d05266\") " pod="openstack/ceilometer-0" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.791874 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8c528520-edac-42d3-a81c-f5aca4d05266-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8c528520-edac-42d3-a81c-f5aca4d05266\") " pod="openstack/ceilometer-0" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.792215 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bwgj\" (UniqueName: \"kubernetes.io/projected/8c528520-edac-42d3-a81c-f5aca4d05266-kube-api-access-2bwgj\") pod \"ceilometer-0\" (UID: \"8c528520-edac-42d3-a81c-f5aca4d05266\") " pod="openstack/ceilometer-0" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.792279 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c528520-edac-42d3-a81c-f5aca4d05266-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"8c528520-edac-42d3-a81c-f5aca4d05266\") " pod="openstack/ceilometer-0" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.792360 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c528520-edac-42d3-a81c-f5aca4d05266-log-httpd\") pod \"ceilometer-0\" (UID: \"8c528520-edac-42d3-a81c-f5aca4d05266\") " pod="openstack/ceilometer-0" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.792397 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c528520-edac-42d3-a81c-f5aca4d05266-config-data\") pod \"ceilometer-0\" (UID: \"8c528520-edac-42d3-a81c-f5aca4d05266\") " pod="openstack/ceilometer-0" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.792413 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c528520-edac-42d3-a81c-f5aca4d05266-run-httpd\") pod \"ceilometer-0\" (UID: \"8c528520-edac-42d3-a81c-f5aca4d05266\") " pod="openstack/ceilometer-0" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.792435 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c528520-edac-42d3-a81c-f5aca4d05266-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8c528520-edac-42d3-a81c-f5aca4d05266\") " pod="openstack/ceilometer-0" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.792507 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c528520-edac-42d3-a81c-f5aca4d05266-scripts\") pod \"ceilometer-0\" (UID: \"8c528520-edac-42d3-a81c-f5aca4d05266\") " pod="openstack/ceilometer-0" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.793008 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c528520-edac-42d3-a81c-f5aca4d05266-log-httpd\") pod \"ceilometer-0\" (UID: \"8c528520-edac-42d3-a81c-f5aca4d05266\") " pod="openstack/ceilometer-0" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.793193 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c528520-edac-42d3-a81c-f5aca4d05266-run-httpd\") pod \"ceilometer-0\" (UID: \"8c528520-edac-42d3-a81c-f5aca4d05266\") " pod="openstack/ceilometer-0" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.797556 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c528520-edac-42d3-a81c-f5aca4d05266-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"8c528520-edac-42d3-a81c-f5aca4d05266\") " pod="openstack/ceilometer-0" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.799151 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c528520-edac-42d3-a81c-f5aca4d05266-config-data\") pod \"ceilometer-0\" (UID: \"8c528520-edac-42d3-a81c-f5aca4d05266\") " pod="openstack/ceilometer-0" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.800493 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c528520-edac-42d3-a81c-f5aca4d05266-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8c528520-edac-42d3-a81c-f5aca4d05266\") " pod="openstack/ceilometer-0" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.801848 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8c528520-edac-42d3-a81c-f5aca4d05266-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8c528520-edac-42d3-a81c-f5aca4d05266\") " pod="openstack/ceilometer-0" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.809173 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c528520-edac-42d3-a81c-f5aca4d05266-scripts\") pod \"ceilometer-0\" (UID: \"8c528520-edac-42d3-a81c-f5aca4d05266\") " pod="openstack/ceilometer-0" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.809465 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bwgj\" (UniqueName: \"kubernetes.io/projected/8c528520-edac-42d3-a81c-f5aca4d05266-kube-api-access-2bwgj\") pod \"ceilometer-0\" (UID: \"8c528520-edac-42d3-a81c-f5aca4d05266\") " pod="openstack/ceilometer-0" Nov 26 22:47:31 crc kubenswrapper[4903]: I1126 22:47:31.888078 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 22:47:32 crc kubenswrapper[4903]: I1126 22:47:32.053873 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6b85164-43b1-4607-8690-797692f5c02f" path="/var/lib/kubelet/pods/f6b85164-43b1-4607-8690-797692f5c02f/volumes" Nov 26 22:47:32 crc kubenswrapper[4903]: I1126 22:47:32.481330 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 22:47:32 crc kubenswrapper[4903]: W1126 22:47:32.484766 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8c528520_edac_42d3_a81c_f5aca4d05266.slice/crio-607f08447f96eaaae520f7fb7c8cf223a73274791f88ea1f83ac20331fd907b9 WatchSource:0}: Error finding container 607f08447f96eaaae520f7fb7c8cf223a73274791f88ea1f83ac20331fd907b9: Status 404 returned error can't find the container with id 607f08447f96eaaae520f7fb7c8cf223a73274791f88ea1f83ac20331fd907b9 Nov 26 22:47:32 crc kubenswrapper[4903]: I1126 22:47:32.910242 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-wsb64" Nov 26 22:47:33 crc kubenswrapper[4903]: I1126 22:47:33.022071 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4049fe04-7d20-41b8-b38c-9c0b39144fda-combined-ca-bundle\") pod \"4049fe04-7d20-41b8-b38c-9c0b39144fda\" (UID: \"4049fe04-7d20-41b8-b38c-9c0b39144fda\") " Nov 26 22:47:33 crc kubenswrapper[4903]: I1126 22:47:33.022224 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4049fe04-7d20-41b8-b38c-9c0b39144fda-config-data\") pod \"4049fe04-7d20-41b8-b38c-9c0b39144fda\" (UID: \"4049fe04-7d20-41b8-b38c-9c0b39144fda\") " Nov 26 22:47:33 crc kubenswrapper[4903]: I1126 22:47:33.022246 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fh2gw\" (UniqueName: \"kubernetes.io/projected/4049fe04-7d20-41b8-b38c-9c0b39144fda-kube-api-access-fh2gw\") pod \"4049fe04-7d20-41b8-b38c-9c0b39144fda\" (UID: \"4049fe04-7d20-41b8-b38c-9c0b39144fda\") " Nov 26 22:47:33 crc kubenswrapper[4903]: I1126 22:47:33.027704 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4049fe04-7d20-41b8-b38c-9c0b39144fda-kube-api-access-fh2gw" (OuterVolumeSpecName: "kube-api-access-fh2gw") pod "4049fe04-7d20-41b8-b38c-9c0b39144fda" (UID: "4049fe04-7d20-41b8-b38c-9c0b39144fda"). InnerVolumeSpecName "kube-api-access-fh2gw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:47:33 crc kubenswrapper[4903]: I1126 22:47:33.071328 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4049fe04-7d20-41b8-b38c-9c0b39144fda-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4049fe04-7d20-41b8-b38c-9c0b39144fda" (UID: "4049fe04-7d20-41b8-b38c-9c0b39144fda"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:47:33 crc kubenswrapper[4903]: I1126 22:47:33.127249 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4049fe04-7d20-41b8-b38c-9c0b39144fda-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:33 crc kubenswrapper[4903]: I1126 22:47:33.127283 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fh2gw\" (UniqueName: \"kubernetes.io/projected/4049fe04-7d20-41b8-b38c-9c0b39144fda-kube-api-access-fh2gw\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:33 crc kubenswrapper[4903]: I1126 22:47:33.130867 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4049fe04-7d20-41b8-b38c-9c0b39144fda-config-data" (OuterVolumeSpecName: "config-data") pod "4049fe04-7d20-41b8-b38c-9c0b39144fda" (UID: "4049fe04-7d20-41b8-b38c-9c0b39144fda"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:47:33 crc kubenswrapper[4903]: I1126 22:47:33.229058 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4049fe04-7d20-41b8-b38c-9c0b39144fda-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:33 crc kubenswrapper[4903]: I1126 22:47:33.445263 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8c528520-edac-42d3-a81c-f5aca4d05266","Type":"ContainerStarted","Data":"607f08447f96eaaae520f7fb7c8cf223a73274791f88ea1f83ac20331fd907b9"} Nov 26 22:47:33 crc kubenswrapper[4903]: I1126 22:47:33.454483 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-wsb64" event={"ID":"4049fe04-7d20-41b8-b38c-9c0b39144fda","Type":"ContainerDied","Data":"7fd5192cd82be51f1745d5b1e29cd0bb44101af05715b4affc9d0efce85970f6"} Nov 26 22:47:33 crc kubenswrapper[4903]: I1126 22:47:33.454529 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7fd5192cd82be51f1745d5b1e29cd0bb44101af05715b4affc9d0efce85970f6" Nov 26 22:47:33 crc kubenswrapper[4903]: I1126 22:47:33.454530 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-wsb64" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.451982 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-66bc977bcf-w4wg7"] Nov 26 22:47:34 crc kubenswrapper[4903]: E1126 22:47:34.452671 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4049fe04-7d20-41b8-b38c-9c0b39144fda" containerName="heat-db-sync" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.452683 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="4049fe04-7d20-41b8-b38c-9c0b39144fda" containerName="heat-db-sync" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.452977 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="4049fe04-7d20-41b8-b38c-9c0b39144fda" containerName="heat-db-sync" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.453774 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-66bc977bcf-w4wg7" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.475168 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-66bc977bcf-w4wg7"] Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.507565 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-867c798764-xfxnw"] Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.509320 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-867c798764-xfxnw" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.549528 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-867c798764-xfxnw"] Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.567039 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-766bc64666-vhfgd"] Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.568617 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-766bc64666-vhfgd" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.578984 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pwxlp\" (UniqueName: \"kubernetes.io/projected/cc6fa80e-1db0-4944-9c07-04df732f4914-kube-api-access-pwxlp\") pod \"heat-engine-66bc977bcf-w4wg7\" (UID: \"cc6fa80e-1db0-4944-9c07-04df732f4914\") " pod="openstack/heat-engine-66bc977bcf-w4wg7" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.579095 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc6fa80e-1db0-4944-9c07-04df732f4914-combined-ca-bundle\") pod \"heat-engine-66bc977bcf-w4wg7\" (UID: \"cc6fa80e-1db0-4944-9c07-04df732f4914\") " pod="openstack/heat-engine-66bc977bcf-w4wg7" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.579122 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cc6fa80e-1db0-4944-9c07-04df732f4914-config-data-custom\") pod \"heat-engine-66bc977bcf-w4wg7\" (UID: \"cc6fa80e-1db0-4944-9c07-04df732f4914\") " pod="openstack/heat-engine-66bc977bcf-w4wg7" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.579246 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc6fa80e-1db0-4944-9c07-04df732f4914-config-data\") pod \"heat-engine-66bc977bcf-w4wg7\" (UID: \"cc6fa80e-1db0-4944-9c07-04df732f4914\") " pod="openstack/heat-engine-66bc977bcf-w4wg7" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.616897 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-766bc64666-vhfgd"] Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.684162 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc6fa80e-1db0-4944-9c07-04df732f4914-combined-ca-bundle\") pod \"heat-engine-66bc977bcf-w4wg7\" (UID: \"cc6fa80e-1db0-4944-9c07-04df732f4914\") " pod="openstack/heat-engine-66bc977bcf-w4wg7" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.684215 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cc6fa80e-1db0-4944-9c07-04df732f4914-config-data-custom\") pod \"heat-engine-66bc977bcf-w4wg7\" (UID: \"cc6fa80e-1db0-4944-9c07-04df732f4914\") " pod="openstack/heat-engine-66bc977bcf-w4wg7" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.684250 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0f6d1e3-0e99-495c-a8da-005cc8d05e25-internal-tls-certs\") pod \"heat-api-867c798764-xfxnw\" (UID: \"e0f6d1e3-0e99-495c-a8da-005cc8d05e25\") " pod="openstack/heat-api-867c798764-xfxnw" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.684282 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3abe2357-63af-453d-9e93-3d087275e569-combined-ca-bundle\") pod \"heat-cfnapi-766bc64666-vhfgd\" (UID: \"3abe2357-63af-453d-9e93-3d087275e569\") " pod="openstack/heat-cfnapi-766bc64666-vhfgd" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.684318 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3abe2357-63af-453d-9e93-3d087275e569-public-tls-certs\") pod \"heat-cfnapi-766bc64666-vhfgd\" (UID: \"3abe2357-63af-453d-9e93-3d087275e569\") " pod="openstack/heat-cfnapi-766bc64666-vhfgd" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.684353 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3abe2357-63af-453d-9e93-3d087275e569-config-data-custom\") pod \"heat-cfnapi-766bc64666-vhfgd\" (UID: \"3abe2357-63af-453d-9e93-3d087275e569\") " pod="openstack/heat-cfnapi-766bc64666-vhfgd" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.684466 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0f6d1e3-0e99-495c-a8da-005cc8d05e25-public-tls-certs\") pod \"heat-api-867c798764-xfxnw\" (UID: \"e0f6d1e3-0e99-495c-a8da-005cc8d05e25\") " pod="openstack/heat-api-867c798764-xfxnw" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.684509 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lck7l\" (UniqueName: \"kubernetes.io/projected/e0f6d1e3-0e99-495c-a8da-005cc8d05e25-kube-api-access-lck7l\") pod \"heat-api-867c798764-xfxnw\" (UID: \"e0f6d1e3-0e99-495c-a8da-005cc8d05e25\") " pod="openstack/heat-api-867c798764-xfxnw" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.684544 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7l6hh\" (UniqueName: \"kubernetes.io/projected/3abe2357-63af-453d-9e93-3d087275e569-kube-api-access-7l6hh\") pod \"heat-cfnapi-766bc64666-vhfgd\" (UID: \"3abe2357-63af-453d-9e93-3d087275e569\") " pod="openstack/heat-cfnapi-766bc64666-vhfgd" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.684569 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0f6d1e3-0e99-495c-a8da-005cc8d05e25-config-data\") pod \"heat-api-867c798764-xfxnw\" (UID: \"e0f6d1e3-0e99-495c-a8da-005cc8d05e25\") " pod="openstack/heat-api-867c798764-xfxnw" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.684622 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc6fa80e-1db0-4944-9c07-04df732f4914-config-data\") pod \"heat-engine-66bc977bcf-w4wg7\" (UID: \"cc6fa80e-1db0-4944-9c07-04df732f4914\") " pod="openstack/heat-engine-66bc977bcf-w4wg7" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.684647 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3abe2357-63af-453d-9e93-3d087275e569-internal-tls-certs\") pod \"heat-cfnapi-766bc64666-vhfgd\" (UID: \"3abe2357-63af-453d-9e93-3d087275e569\") " pod="openstack/heat-cfnapi-766bc64666-vhfgd" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.684716 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3abe2357-63af-453d-9e93-3d087275e569-config-data\") pod \"heat-cfnapi-766bc64666-vhfgd\" (UID: \"3abe2357-63af-453d-9e93-3d087275e569\") " pod="openstack/heat-cfnapi-766bc64666-vhfgd" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.684749 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pwxlp\" (UniqueName: \"kubernetes.io/projected/cc6fa80e-1db0-4944-9c07-04df732f4914-kube-api-access-pwxlp\") pod \"heat-engine-66bc977bcf-w4wg7\" (UID: \"cc6fa80e-1db0-4944-9c07-04df732f4914\") " pod="openstack/heat-engine-66bc977bcf-w4wg7" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.684805 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e0f6d1e3-0e99-495c-a8da-005cc8d05e25-config-data-custom\") pod \"heat-api-867c798764-xfxnw\" (UID: \"e0f6d1e3-0e99-495c-a8da-005cc8d05e25\") " pod="openstack/heat-api-867c798764-xfxnw" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.684867 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0f6d1e3-0e99-495c-a8da-005cc8d05e25-combined-ca-bundle\") pod \"heat-api-867c798764-xfxnw\" (UID: \"e0f6d1e3-0e99-495c-a8da-005cc8d05e25\") " pod="openstack/heat-api-867c798764-xfxnw" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.697898 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc6fa80e-1db0-4944-9c07-04df732f4914-config-data\") pod \"heat-engine-66bc977bcf-w4wg7\" (UID: \"cc6fa80e-1db0-4944-9c07-04df732f4914\") " pod="openstack/heat-engine-66bc977bcf-w4wg7" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.700998 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cc6fa80e-1db0-4944-9c07-04df732f4914-config-data-custom\") pod \"heat-engine-66bc977bcf-w4wg7\" (UID: \"cc6fa80e-1db0-4944-9c07-04df732f4914\") " pod="openstack/heat-engine-66bc977bcf-w4wg7" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.702777 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc6fa80e-1db0-4944-9c07-04df732f4914-combined-ca-bundle\") pod \"heat-engine-66bc977bcf-w4wg7\" (UID: \"cc6fa80e-1db0-4944-9c07-04df732f4914\") " pod="openstack/heat-engine-66bc977bcf-w4wg7" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.703372 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pwxlp\" (UniqueName: \"kubernetes.io/projected/cc6fa80e-1db0-4944-9c07-04df732f4914-kube-api-access-pwxlp\") pod \"heat-engine-66bc977bcf-w4wg7\" (UID: \"cc6fa80e-1db0-4944-9c07-04df732f4914\") " pod="openstack/heat-engine-66bc977bcf-w4wg7" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.787060 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3abe2357-63af-453d-9e93-3d087275e569-config-data\") pod \"heat-cfnapi-766bc64666-vhfgd\" (UID: \"3abe2357-63af-453d-9e93-3d087275e569\") " pod="openstack/heat-cfnapi-766bc64666-vhfgd" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.787845 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e0f6d1e3-0e99-495c-a8da-005cc8d05e25-config-data-custom\") pod \"heat-api-867c798764-xfxnw\" (UID: \"e0f6d1e3-0e99-495c-a8da-005cc8d05e25\") " pod="openstack/heat-api-867c798764-xfxnw" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.788053 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0f6d1e3-0e99-495c-a8da-005cc8d05e25-combined-ca-bundle\") pod \"heat-api-867c798764-xfxnw\" (UID: \"e0f6d1e3-0e99-495c-a8da-005cc8d05e25\") " pod="openstack/heat-api-867c798764-xfxnw" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.788158 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0f6d1e3-0e99-495c-a8da-005cc8d05e25-internal-tls-certs\") pod \"heat-api-867c798764-xfxnw\" (UID: \"e0f6d1e3-0e99-495c-a8da-005cc8d05e25\") " pod="openstack/heat-api-867c798764-xfxnw" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.788229 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3abe2357-63af-453d-9e93-3d087275e569-combined-ca-bundle\") pod \"heat-cfnapi-766bc64666-vhfgd\" (UID: \"3abe2357-63af-453d-9e93-3d087275e569\") " pod="openstack/heat-cfnapi-766bc64666-vhfgd" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.788303 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3abe2357-63af-453d-9e93-3d087275e569-public-tls-certs\") pod \"heat-cfnapi-766bc64666-vhfgd\" (UID: \"3abe2357-63af-453d-9e93-3d087275e569\") " pod="openstack/heat-cfnapi-766bc64666-vhfgd" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.788386 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3abe2357-63af-453d-9e93-3d087275e569-config-data-custom\") pod \"heat-cfnapi-766bc64666-vhfgd\" (UID: \"3abe2357-63af-453d-9e93-3d087275e569\") " pod="openstack/heat-cfnapi-766bc64666-vhfgd" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.788588 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0f6d1e3-0e99-495c-a8da-005cc8d05e25-public-tls-certs\") pod \"heat-api-867c798764-xfxnw\" (UID: \"e0f6d1e3-0e99-495c-a8da-005cc8d05e25\") " pod="openstack/heat-api-867c798764-xfxnw" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.788671 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lck7l\" (UniqueName: \"kubernetes.io/projected/e0f6d1e3-0e99-495c-a8da-005cc8d05e25-kube-api-access-lck7l\") pod \"heat-api-867c798764-xfxnw\" (UID: \"e0f6d1e3-0e99-495c-a8da-005cc8d05e25\") " pod="openstack/heat-api-867c798764-xfxnw" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.788767 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7l6hh\" (UniqueName: \"kubernetes.io/projected/3abe2357-63af-453d-9e93-3d087275e569-kube-api-access-7l6hh\") pod \"heat-cfnapi-766bc64666-vhfgd\" (UID: \"3abe2357-63af-453d-9e93-3d087275e569\") " pod="openstack/heat-cfnapi-766bc64666-vhfgd" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.788833 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0f6d1e3-0e99-495c-a8da-005cc8d05e25-config-data\") pod \"heat-api-867c798764-xfxnw\" (UID: \"e0f6d1e3-0e99-495c-a8da-005cc8d05e25\") " pod="openstack/heat-api-867c798764-xfxnw" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.788920 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3abe2357-63af-453d-9e93-3d087275e569-internal-tls-certs\") pod \"heat-cfnapi-766bc64666-vhfgd\" (UID: \"3abe2357-63af-453d-9e93-3d087275e569\") " pod="openstack/heat-cfnapi-766bc64666-vhfgd" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.792658 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3abe2357-63af-453d-9e93-3d087275e569-config-data\") pod \"heat-cfnapi-766bc64666-vhfgd\" (UID: \"3abe2357-63af-453d-9e93-3d087275e569\") " pod="openstack/heat-cfnapi-766bc64666-vhfgd" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.794090 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3abe2357-63af-453d-9e93-3d087275e569-internal-tls-certs\") pod \"heat-cfnapi-766bc64666-vhfgd\" (UID: \"3abe2357-63af-453d-9e93-3d087275e569\") " pod="openstack/heat-cfnapi-766bc64666-vhfgd" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.798588 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e0f6d1e3-0e99-495c-a8da-005cc8d05e25-config-data-custom\") pod \"heat-api-867c798764-xfxnw\" (UID: \"e0f6d1e3-0e99-495c-a8da-005cc8d05e25\") " pod="openstack/heat-api-867c798764-xfxnw" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.799422 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0f6d1e3-0e99-495c-a8da-005cc8d05e25-combined-ca-bundle\") pod \"heat-api-867c798764-xfxnw\" (UID: \"e0f6d1e3-0e99-495c-a8da-005cc8d05e25\") " pod="openstack/heat-api-867c798764-xfxnw" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.800030 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-66bc977bcf-w4wg7" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.803540 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3abe2357-63af-453d-9e93-3d087275e569-public-tls-certs\") pod \"heat-cfnapi-766bc64666-vhfgd\" (UID: \"3abe2357-63af-453d-9e93-3d087275e569\") " pod="openstack/heat-cfnapi-766bc64666-vhfgd" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.805666 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3abe2357-63af-453d-9e93-3d087275e569-config-data-custom\") pod \"heat-cfnapi-766bc64666-vhfgd\" (UID: \"3abe2357-63af-453d-9e93-3d087275e569\") " pod="openstack/heat-cfnapi-766bc64666-vhfgd" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.810080 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3abe2357-63af-453d-9e93-3d087275e569-combined-ca-bundle\") pod \"heat-cfnapi-766bc64666-vhfgd\" (UID: \"3abe2357-63af-453d-9e93-3d087275e569\") " pod="openstack/heat-cfnapi-766bc64666-vhfgd" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.810157 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0f6d1e3-0e99-495c-a8da-005cc8d05e25-internal-tls-certs\") pod \"heat-api-867c798764-xfxnw\" (UID: \"e0f6d1e3-0e99-495c-a8da-005cc8d05e25\") " pod="openstack/heat-api-867c798764-xfxnw" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.810146 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0f6d1e3-0e99-495c-a8da-005cc8d05e25-config-data\") pod \"heat-api-867c798764-xfxnw\" (UID: \"e0f6d1e3-0e99-495c-a8da-005cc8d05e25\") " pod="openstack/heat-api-867c798764-xfxnw" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.814417 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0f6d1e3-0e99-495c-a8da-005cc8d05e25-public-tls-certs\") pod \"heat-api-867c798764-xfxnw\" (UID: \"e0f6d1e3-0e99-495c-a8da-005cc8d05e25\") " pod="openstack/heat-api-867c798764-xfxnw" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.836330 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lck7l\" (UniqueName: \"kubernetes.io/projected/e0f6d1e3-0e99-495c-a8da-005cc8d05e25-kube-api-access-lck7l\") pod \"heat-api-867c798764-xfxnw\" (UID: \"e0f6d1e3-0e99-495c-a8da-005cc8d05e25\") " pod="openstack/heat-api-867c798764-xfxnw" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.836862 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-867c798764-xfxnw" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.847525 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7l6hh\" (UniqueName: \"kubernetes.io/projected/3abe2357-63af-453d-9e93-3d087275e569-kube-api-access-7l6hh\") pod \"heat-cfnapi-766bc64666-vhfgd\" (UID: \"3abe2357-63af-453d-9e93-3d087275e569\") " pod="openstack/heat-cfnapi-766bc64666-vhfgd" Nov 26 22:47:34 crc kubenswrapper[4903]: I1126 22:47:34.902401 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-766bc64666-vhfgd" Nov 26 22:47:36 crc kubenswrapper[4903]: W1126 22:47:36.934207 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode0f6d1e3_0e99_495c_a8da_005cc8d05e25.slice/crio-ef5ca19566b1c0f5226dcfe690a8b654a2b732d45278bbff2194d4e40f8bb11d WatchSource:0}: Error finding container ef5ca19566b1c0f5226dcfe690a8b654a2b732d45278bbff2194d4e40f8bb11d: Status 404 returned error can't find the container with id ef5ca19566b1c0f5226dcfe690a8b654a2b732d45278bbff2194d4e40f8bb11d Nov 26 22:47:36 crc kubenswrapper[4903]: I1126 22:47:36.937944 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-867c798764-xfxnw"] Nov 26 22:47:36 crc kubenswrapper[4903]: I1126 22:47:36.954480 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-66bc977bcf-w4wg7"] Nov 26 22:47:36 crc kubenswrapper[4903]: I1126 22:47:36.994918 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6f6df4f56c-4fkw5" Nov 26 22:47:37 crc kubenswrapper[4903]: I1126 22:47:37.064558 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7d84b4d45c-m9mfn"] Nov 26 22:47:37 crc kubenswrapper[4903]: I1126 22:47:37.064826 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" podUID="df3949c0-f412-4202-a768-e567ca5e2ea2" containerName="dnsmasq-dns" containerID="cri-o://dc95b161ccd03f809a192035c9e5aad5ea04e6e47adb24e72d59886d9b58fe23" gracePeriod=10 Nov 26 22:47:37 crc kubenswrapper[4903]: I1126 22:47:37.132024 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-766bc64666-vhfgd"] Nov 26 22:47:37 crc kubenswrapper[4903]: W1126 22:47:37.145477 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3abe2357_63af_453d_9e93_3d087275e569.slice/crio-03789f00f02ee3d76a049713f48704cb19c2da388fe46207a01900860ce10190 WatchSource:0}: Error finding container 03789f00f02ee3d76a049713f48704cb19c2da388fe46207a01900860ce10190: Status 404 returned error can't find the container with id 03789f00f02ee3d76a049713f48704cb19c2da388fe46207a01900860ce10190 Nov 26 22:47:37 crc kubenswrapper[4903]: I1126 22:47:37.607854 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8c528520-edac-42d3-a81c-f5aca4d05266","Type":"ContainerStarted","Data":"1ff92f1f4fffef0982df5dd752836cf699df017fcdfd768b8a5f1ac20981c3a9"} Nov 26 22:47:37 crc kubenswrapper[4903]: I1126 22:47:37.608177 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8c528520-edac-42d3-a81c-f5aca4d05266","Type":"ContainerStarted","Data":"ac0abc28008225ee404252ba996c671d3cc3a1631b41556ff67cc60c12b618e4"} Nov 26 22:47:37 crc kubenswrapper[4903]: I1126 22:47:37.639916 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-867c798764-xfxnw" event={"ID":"e0f6d1e3-0e99-495c-a8da-005cc8d05e25","Type":"ContainerStarted","Data":"ef5ca19566b1c0f5226dcfe690a8b654a2b732d45278bbff2194d4e40f8bb11d"} Nov 26 22:47:37 crc kubenswrapper[4903]: I1126 22:47:37.659876 4903 generic.go:334] "Generic (PLEG): container finished" podID="df3949c0-f412-4202-a768-e567ca5e2ea2" containerID="dc95b161ccd03f809a192035c9e5aad5ea04e6e47adb24e72d59886d9b58fe23" exitCode=0 Nov 26 22:47:37 crc kubenswrapper[4903]: I1126 22:47:37.659938 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" event={"ID":"df3949c0-f412-4202-a768-e567ca5e2ea2","Type":"ContainerDied","Data":"dc95b161ccd03f809a192035c9e5aad5ea04e6e47adb24e72d59886d9b58fe23"} Nov 26 22:47:37 crc kubenswrapper[4903]: I1126 22:47:37.672523 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-66bc977bcf-w4wg7" event={"ID":"cc6fa80e-1db0-4944-9c07-04df732f4914","Type":"ContainerStarted","Data":"ecab702bd489469f38da843fe9c234c9fa356a7354e8feeb20ccf6534c27155c"} Nov 26 22:47:37 crc kubenswrapper[4903]: I1126 22:47:37.672787 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-66bc977bcf-w4wg7" event={"ID":"cc6fa80e-1db0-4944-9c07-04df732f4914","Type":"ContainerStarted","Data":"823cc12f376477983b70c6f4d543e99d29d9ef301cc23f7cf525797a57c60c13"} Nov 26 22:47:37 crc kubenswrapper[4903]: I1126 22:47:37.673653 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-66bc977bcf-w4wg7" Nov 26 22:47:37 crc kubenswrapper[4903]: I1126 22:47:37.675817 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-766bc64666-vhfgd" event={"ID":"3abe2357-63af-453d-9e93-3d087275e569","Type":"ContainerStarted","Data":"03789f00f02ee3d76a049713f48704cb19c2da388fe46207a01900860ce10190"} Nov 26 22:47:37 crc kubenswrapper[4903]: I1126 22:47:37.694002 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-66bc977bcf-w4wg7" podStartSLOduration=3.6939874809999997 podStartE2EDuration="3.693987481s" podCreationTimestamp="2025-11-26 22:47:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:47:37.69243769 +0000 UTC m=+1586.382672590" watchObservedRunningTime="2025-11-26 22:47:37.693987481 +0000 UTC m=+1586.384222391" Nov 26 22:47:37 crc kubenswrapper[4903]: I1126 22:47:37.749369 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" Nov 26 22:47:37 crc kubenswrapper[4903]: I1126 22:47:37.867649 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-openstack-edpm-ipam\") pod \"df3949c0-f412-4202-a768-e567ca5e2ea2\" (UID: \"df3949c0-f412-4202-a768-e567ca5e2ea2\") " Nov 26 22:47:37 crc kubenswrapper[4903]: I1126 22:47:37.867780 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-dns-svc\") pod \"df3949c0-f412-4202-a768-e567ca5e2ea2\" (UID: \"df3949c0-f412-4202-a768-e567ca5e2ea2\") " Nov 26 22:47:37 crc kubenswrapper[4903]: I1126 22:47:37.867835 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-config\") pod \"df3949c0-f412-4202-a768-e567ca5e2ea2\" (UID: \"df3949c0-f412-4202-a768-e567ca5e2ea2\") " Nov 26 22:47:37 crc kubenswrapper[4903]: I1126 22:47:37.867891 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-ovsdbserver-nb\") pod \"df3949c0-f412-4202-a768-e567ca5e2ea2\" (UID: \"df3949c0-f412-4202-a768-e567ca5e2ea2\") " Nov 26 22:47:37 crc kubenswrapper[4903]: I1126 22:47:37.867920 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-ovsdbserver-sb\") pod \"df3949c0-f412-4202-a768-e567ca5e2ea2\" (UID: \"df3949c0-f412-4202-a768-e567ca5e2ea2\") " Nov 26 22:47:37 crc kubenswrapper[4903]: I1126 22:47:37.867971 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bp79p\" (UniqueName: \"kubernetes.io/projected/df3949c0-f412-4202-a768-e567ca5e2ea2-kube-api-access-bp79p\") pod \"df3949c0-f412-4202-a768-e567ca5e2ea2\" (UID: \"df3949c0-f412-4202-a768-e567ca5e2ea2\") " Nov 26 22:47:37 crc kubenswrapper[4903]: I1126 22:47:37.868163 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-dns-swift-storage-0\") pod \"df3949c0-f412-4202-a768-e567ca5e2ea2\" (UID: \"df3949c0-f412-4202-a768-e567ca5e2ea2\") " Nov 26 22:47:37 crc kubenswrapper[4903]: I1126 22:47:37.898777 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df3949c0-f412-4202-a768-e567ca5e2ea2-kube-api-access-bp79p" (OuterVolumeSpecName: "kube-api-access-bp79p") pod "df3949c0-f412-4202-a768-e567ca5e2ea2" (UID: "df3949c0-f412-4202-a768-e567ca5e2ea2"). InnerVolumeSpecName "kube-api-access-bp79p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:47:37 crc kubenswrapper[4903]: I1126 22:47:37.940367 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "df3949c0-f412-4202-a768-e567ca5e2ea2" (UID: "df3949c0-f412-4202-a768-e567ca5e2ea2"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:47:37 crc kubenswrapper[4903]: I1126 22:47:37.945662 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "df3949c0-f412-4202-a768-e567ca5e2ea2" (UID: "df3949c0-f412-4202-a768-e567ca5e2ea2"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:47:37 crc kubenswrapper[4903]: I1126 22:47:37.947488 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "df3949c0-f412-4202-a768-e567ca5e2ea2" (UID: "df3949c0-f412-4202-a768-e567ca5e2ea2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:47:37 crc kubenswrapper[4903]: I1126 22:47:37.976097 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bp79p\" (UniqueName: \"kubernetes.io/projected/df3949c0-f412-4202-a768-e567ca5e2ea2-kube-api-access-bp79p\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:37 crc kubenswrapper[4903]: I1126 22:47:37.976131 4903 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:37 crc kubenswrapper[4903]: I1126 22:47:37.976142 4903 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:37 crc kubenswrapper[4903]: I1126 22:47:37.976152 4903 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:37 crc kubenswrapper[4903]: I1126 22:47:37.976892 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-config" (OuterVolumeSpecName: "config") pod "df3949c0-f412-4202-a768-e567ca5e2ea2" (UID: "df3949c0-f412-4202-a768-e567ca5e2ea2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:47:38 crc kubenswrapper[4903]: I1126 22:47:38.010210 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "df3949c0-f412-4202-a768-e567ca5e2ea2" (UID: "df3949c0-f412-4202-a768-e567ca5e2ea2"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:47:38 crc kubenswrapper[4903]: I1126 22:47:38.021089 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "df3949c0-f412-4202-a768-e567ca5e2ea2" (UID: "df3949c0-f412-4202-a768-e567ca5e2ea2"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 22:47:38 crc kubenswrapper[4903]: I1126 22:47:38.078779 4903 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-config\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:38 crc kubenswrapper[4903]: I1126 22:47:38.078807 4903 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:38 crc kubenswrapper[4903]: I1126 22:47:38.078818 4903 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/df3949c0-f412-4202-a768-e567ca5e2ea2-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:38 crc kubenswrapper[4903]: I1126 22:47:38.688856 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" event={"ID":"df3949c0-f412-4202-a768-e567ca5e2ea2","Type":"ContainerDied","Data":"c37412d3b0bc8b51a5009dd9d5cf689e5df75243c5ed080e0fe5ecae8c8a8a4e"} Nov 26 22:47:38 crc kubenswrapper[4903]: I1126 22:47:38.689148 4903 scope.go:117] "RemoveContainer" containerID="dc95b161ccd03f809a192035c9e5aad5ea04e6e47adb24e72d59886d9b58fe23" Nov 26 22:47:38 crc kubenswrapper[4903]: I1126 22:47:38.688934 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d84b4d45c-m9mfn" Nov 26 22:47:38 crc kubenswrapper[4903]: I1126 22:47:38.729204 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7d84b4d45c-m9mfn"] Nov 26 22:47:38 crc kubenswrapper[4903]: I1126 22:47:38.745678 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7d84b4d45c-m9mfn"] Nov 26 22:47:38 crc kubenswrapper[4903]: I1126 22:47:38.776504 4903 scope.go:117] "RemoveContainer" containerID="bfa54c17926c9d121cd37ecce1d070ae6eace4e2522d45fdb836ef1c63f5108d" Nov 26 22:47:40 crc kubenswrapper[4903]: I1126 22:47:40.067732 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="df3949c0-f412-4202-a768-e567ca5e2ea2" path="/var/lib/kubelet/pods/df3949c0-f412-4202-a768-e567ca5e2ea2/volumes" Nov 26 22:47:40 crc kubenswrapper[4903]: I1126 22:47:40.716540 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-867c798764-xfxnw" event={"ID":"e0f6d1e3-0e99-495c-a8da-005cc8d05e25","Type":"ContainerStarted","Data":"da00f87420ac7c7a2df148878afd83874be3bc1e53e3aefed67188b929c2d893"} Nov 26 22:47:40 crc kubenswrapper[4903]: I1126 22:47:40.717327 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-867c798764-xfxnw" Nov 26 22:47:40 crc kubenswrapper[4903]: I1126 22:47:40.721254 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-766bc64666-vhfgd" event={"ID":"3abe2357-63af-453d-9e93-3d087275e569","Type":"ContainerStarted","Data":"3c0f2c755bb23ce8fe80d1d7f97bfdcb09c5cd3aebe0ea9dffe518a72fbe01f4"} Nov 26 22:47:40 crc kubenswrapper[4903]: I1126 22:47:40.721469 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-766bc64666-vhfgd" Nov 26 22:47:40 crc kubenswrapper[4903]: I1126 22:47:40.725052 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8c528520-edac-42d3-a81c-f5aca4d05266","Type":"ContainerStarted","Data":"06c9546838f57de27049e5afb438eac233485c9fc3ad667a5689db20f10af834"} Nov 26 22:47:40 crc kubenswrapper[4903]: I1126 22:47:40.757829 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-867c798764-xfxnw" podStartSLOduration=4.465735187 podStartE2EDuration="6.757807678s" podCreationTimestamp="2025-11-26 22:47:34 +0000 UTC" firstStartedPulling="2025-11-26 22:47:36.944087557 +0000 UTC m=+1585.634322467" lastFinishedPulling="2025-11-26 22:47:39.236160028 +0000 UTC m=+1587.926394958" observedRunningTime="2025-11-26 22:47:40.753593866 +0000 UTC m=+1589.443828776" watchObservedRunningTime="2025-11-26 22:47:40.757807678 +0000 UTC m=+1589.448042578" Nov 26 22:47:40 crc kubenswrapper[4903]: I1126 22:47:40.774839 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-766bc64666-vhfgd" podStartSLOduration=4.691885846 podStartE2EDuration="6.774822643s" podCreationTimestamp="2025-11-26 22:47:34 +0000 UTC" firstStartedPulling="2025-11-26 22:47:37.148411553 +0000 UTC m=+1585.838646463" lastFinishedPulling="2025-11-26 22:47:39.23134835 +0000 UTC m=+1587.921583260" observedRunningTime="2025-11-26 22:47:40.771671439 +0000 UTC m=+1589.461906349" watchObservedRunningTime="2025-11-26 22:47:40.774822643 +0000 UTC m=+1589.465057553" Nov 26 22:47:41 crc kubenswrapper[4903]: I1126 22:47:41.743044 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8c528520-edac-42d3-a81c-f5aca4d05266","Type":"ContainerStarted","Data":"5d3bf4678993f9ccf521fb5e42a0e6b846c3b660406fa83ec299c092622c968d"} Nov 26 22:47:41 crc kubenswrapper[4903]: I1126 22:47:41.743647 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 22:47:41 crc kubenswrapper[4903]: I1126 22:47:41.770944 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.996689018 podStartE2EDuration="10.77092213s" podCreationTimestamp="2025-11-26 22:47:31 +0000 UTC" firstStartedPulling="2025-11-26 22:47:32.498142996 +0000 UTC m=+1581.188377896" lastFinishedPulling="2025-11-26 22:47:41.272376098 +0000 UTC m=+1589.962611008" observedRunningTime="2025-11-26 22:47:41.762836994 +0000 UTC m=+1590.453071954" watchObservedRunningTime="2025-11-26 22:47:41.77092213 +0000 UTC m=+1590.461157040" Nov 26 22:47:46 crc kubenswrapper[4903]: I1126 22:47:46.599605 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2"] Nov 26 22:47:46 crc kubenswrapper[4903]: E1126 22:47:46.600482 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df3949c0-f412-4202-a768-e567ca5e2ea2" containerName="init" Nov 26 22:47:46 crc kubenswrapper[4903]: I1126 22:47:46.600494 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="df3949c0-f412-4202-a768-e567ca5e2ea2" containerName="init" Nov 26 22:47:46 crc kubenswrapper[4903]: E1126 22:47:46.600532 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df3949c0-f412-4202-a768-e567ca5e2ea2" containerName="dnsmasq-dns" Nov 26 22:47:46 crc kubenswrapper[4903]: I1126 22:47:46.600539 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="df3949c0-f412-4202-a768-e567ca5e2ea2" containerName="dnsmasq-dns" Nov 26 22:47:46 crc kubenswrapper[4903]: I1126 22:47:46.600810 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="df3949c0-f412-4202-a768-e567ca5e2ea2" containerName="dnsmasq-dns" Nov 26 22:47:46 crc kubenswrapper[4903]: I1126 22:47:46.601634 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2" Nov 26 22:47:46 crc kubenswrapper[4903]: I1126 22:47:46.606147 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 22:47:46 crc kubenswrapper[4903]: I1126 22:47:46.606355 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-62bf2" Nov 26 22:47:46 crc kubenswrapper[4903]: I1126 22:47:46.606458 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 22:47:46 crc kubenswrapper[4903]: I1126 22:47:46.606565 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 22:47:46 crc kubenswrapper[4903]: I1126 22:47:46.619379 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2"] Nov 26 22:47:46 crc kubenswrapper[4903]: I1126 22:47:46.720102 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/154ea937-525e-406f-bef0-ffd2c360d7e1-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2\" (UID: \"154ea937-525e-406f-bef0-ffd2c360d7e1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2" Nov 26 22:47:46 crc kubenswrapper[4903]: I1126 22:47:46.720204 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvvpf\" (UniqueName: \"kubernetes.io/projected/154ea937-525e-406f-bef0-ffd2c360d7e1-kube-api-access-bvvpf\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2\" (UID: \"154ea937-525e-406f-bef0-ffd2c360d7e1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2" Nov 26 22:47:46 crc kubenswrapper[4903]: I1126 22:47:46.720364 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/154ea937-525e-406f-bef0-ffd2c360d7e1-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2\" (UID: \"154ea937-525e-406f-bef0-ffd2c360d7e1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2" Nov 26 22:47:46 crc kubenswrapper[4903]: I1126 22:47:46.720424 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/154ea937-525e-406f-bef0-ffd2c360d7e1-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2\" (UID: \"154ea937-525e-406f-bef0-ffd2c360d7e1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2" Nov 26 22:47:46 crc kubenswrapper[4903]: I1126 22:47:46.738072 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-766bc64666-vhfgd" Nov 26 22:47:46 crc kubenswrapper[4903]: I1126 22:47:46.739333 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-867c798764-xfxnw" Nov 26 22:47:46 crc kubenswrapper[4903]: I1126 22:47:46.822676 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/154ea937-525e-406f-bef0-ffd2c360d7e1-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2\" (UID: \"154ea937-525e-406f-bef0-ffd2c360d7e1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2" Nov 26 22:47:46 crc kubenswrapper[4903]: I1126 22:47:46.822775 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/154ea937-525e-406f-bef0-ffd2c360d7e1-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2\" (UID: \"154ea937-525e-406f-bef0-ffd2c360d7e1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2" Nov 26 22:47:46 crc kubenswrapper[4903]: I1126 22:47:46.822931 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/154ea937-525e-406f-bef0-ffd2c360d7e1-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2\" (UID: \"154ea937-525e-406f-bef0-ffd2c360d7e1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2" Nov 26 22:47:46 crc kubenswrapper[4903]: I1126 22:47:46.822967 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvvpf\" (UniqueName: \"kubernetes.io/projected/154ea937-525e-406f-bef0-ffd2c360d7e1-kube-api-access-bvvpf\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2\" (UID: \"154ea937-525e-406f-bef0-ffd2c360d7e1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2" Nov 26 22:47:46 crc kubenswrapper[4903]: I1126 22:47:46.836549 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/154ea937-525e-406f-bef0-ffd2c360d7e1-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2\" (UID: \"154ea937-525e-406f-bef0-ffd2c360d7e1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2" Nov 26 22:47:46 crc kubenswrapper[4903]: I1126 22:47:46.840224 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-6fff56d55f-8bbzh"] Nov 26 22:47:46 crc kubenswrapper[4903]: I1126 22:47:46.840419 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" podUID="4ee458b7-00d7-40e4-8c43-8c61e6fb87ae" containerName="heat-cfnapi" containerID="cri-o://3b8719ae15f3ee09a83474358e9125d061ea8ead25fecc0051331bfdb2122077" gracePeriod=60 Nov 26 22:47:46 crc kubenswrapper[4903]: I1126 22:47:46.842417 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/154ea937-525e-406f-bef0-ffd2c360d7e1-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2\" (UID: \"154ea937-525e-406f-bef0-ffd2c360d7e1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2" Nov 26 22:47:46 crc kubenswrapper[4903]: I1126 22:47:46.843999 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/154ea937-525e-406f-bef0-ffd2c360d7e1-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2\" (UID: \"154ea937-525e-406f-bef0-ffd2c360d7e1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2" Nov 26 22:47:46 crc kubenswrapper[4903]: I1126 22:47:46.847803 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvvpf\" (UniqueName: \"kubernetes.io/projected/154ea937-525e-406f-bef0-ffd2c360d7e1-kube-api-access-bvvpf\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2\" (UID: \"154ea937-525e-406f-bef0-ffd2c360d7e1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2" Nov 26 22:47:46 crc kubenswrapper[4903]: I1126 22:47:46.854936 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-67fbd86d69-b26tj"] Nov 26 22:47:46 crc kubenswrapper[4903]: I1126 22:47:46.855124 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-api-67fbd86d69-b26tj" podUID="7fb6a2cb-3364-4848-8eb4-a0cbc97df32a" containerName="heat-api" containerID="cri-o://4dc2b6467314c282d106d8006f59bdb9a904b3c666d77a0350bad9cc7de820c1" gracePeriod=60 Nov 26 22:47:46 crc kubenswrapper[4903]: I1126 22:47:46.935009 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2" Nov 26 22:47:47 crc kubenswrapper[4903]: I1126 22:47:47.846943 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2"] Nov 26 22:47:48 crc kubenswrapper[4903]: I1126 22:47:48.813861 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2" event={"ID":"154ea937-525e-406f-bef0-ffd2c360d7e1","Type":"ContainerStarted","Data":"900ac3461f1988931644f70f979371cade6b5a6f7954e8968c0a7f69902c5ecf"} Nov 26 22:47:50 crc kubenswrapper[4903]: I1126 22:47:50.294064 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-api-67fbd86d69-b26tj" podUID="7fb6a2cb-3364-4848-8eb4-a0cbc97df32a" containerName="heat-api" probeResult="failure" output="Get \"https://10.217.0.221:8004/healthcheck\": read tcp 10.217.0.2:52310->10.217.0.221:8004: read: connection reset by peer" Nov 26 22:47:50 crc kubenswrapper[4903]: I1126 22:47:50.301088 4903 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" podUID="4ee458b7-00d7-40e4-8c43-8c61e6fb87ae" containerName="heat-cfnapi" probeResult="failure" output="Get \"https://10.217.0.222:8000/healthcheck\": read tcp 10.217.0.2:46876->10.217.0.222:8000: read: connection reset by peer" Nov 26 22:47:50 crc kubenswrapper[4903]: I1126 22:47:50.837201 4903 generic.go:334] "Generic (PLEG): container finished" podID="4ee458b7-00d7-40e4-8c43-8c61e6fb87ae" containerID="3b8719ae15f3ee09a83474358e9125d061ea8ead25fecc0051331bfdb2122077" exitCode=0 Nov 26 22:47:50 crc kubenswrapper[4903]: I1126 22:47:50.837450 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" event={"ID":"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae","Type":"ContainerDied","Data":"3b8719ae15f3ee09a83474358e9125d061ea8ead25fecc0051331bfdb2122077"} Nov 26 22:47:50 crc kubenswrapper[4903]: I1126 22:47:50.837476 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" event={"ID":"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae","Type":"ContainerDied","Data":"499cb623a12d695d2ada381d47e43a6cdc2086605a9818b62b6a1f89dc660152"} Nov 26 22:47:50 crc kubenswrapper[4903]: I1126 22:47:50.837486 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="499cb623a12d695d2ada381d47e43a6cdc2086605a9818b62b6a1f89dc660152" Nov 26 22:47:50 crc kubenswrapper[4903]: I1126 22:47:50.838757 4903 generic.go:334] "Generic (PLEG): container finished" podID="7fb6a2cb-3364-4848-8eb4-a0cbc97df32a" containerID="4dc2b6467314c282d106d8006f59bdb9a904b3c666d77a0350bad9cc7de820c1" exitCode=0 Nov 26 22:47:50 crc kubenswrapper[4903]: I1126 22:47:50.838777 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-67fbd86d69-b26tj" event={"ID":"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a","Type":"ContainerDied","Data":"4dc2b6467314c282d106d8006f59bdb9a904b3c666d77a0350bad9cc7de820c1"} Nov 26 22:47:50 crc kubenswrapper[4903]: I1126 22:47:50.838793 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-67fbd86d69-b26tj" event={"ID":"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a","Type":"ContainerDied","Data":"17be9b44e761978010c65e501d3be4ad4ca59f7b3f85ed15a583a1d8a9347795"} Nov 26 22:47:50 crc kubenswrapper[4903]: I1126 22:47:50.838802 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="17be9b44e761978010c65e501d3be4ad4ca59f7b3f85ed15a583a1d8a9347795" Nov 26 22:47:50 crc kubenswrapper[4903]: I1126 22:47:50.862611 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" Nov 26 22:47:50 crc kubenswrapper[4903]: I1126 22:47:50.868561 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-67fbd86d69-b26tj" Nov 26 22:47:50 crc kubenswrapper[4903]: I1126 22:47:50.925455 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-internal-tls-certs\") pod \"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae\" (UID: \"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae\") " Nov 26 22:47:50 crc kubenswrapper[4903]: I1126 22:47:50.925527 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xw5mb\" (UniqueName: \"kubernetes.io/projected/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-kube-api-access-xw5mb\") pod \"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae\" (UID: \"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae\") " Nov 26 22:47:50 crc kubenswrapper[4903]: I1126 22:47:50.925583 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-config-data\") pod \"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae\" (UID: \"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae\") " Nov 26 22:47:50 crc kubenswrapper[4903]: I1126 22:47:50.925648 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-public-tls-certs\") pod \"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a\" (UID: \"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a\") " Nov 26 22:47:50 crc kubenswrapper[4903]: I1126 22:47:50.925744 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-public-tls-certs\") pod \"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae\" (UID: \"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae\") " Nov 26 22:47:50 crc kubenswrapper[4903]: I1126 22:47:50.925769 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-internal-tls-certs\") pod \"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a\" (UID: \"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a\") " Nov 26 22:47:50 crc kubenswrapper[4903]: I1126 22:47:50.925836 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-config-data-custom\") pod \"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a\" (UID: \"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a\") " Nov 26 22:47:50 crc kubenswrapper[4903]: I1126 22:47:50.925909 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-combined-ca-bundle\") pod \"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae\" (UID: \"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae\") " Nov 26 22:47:50 crc kubenswrapper[4903]: I1126 22:47:50.925976 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gw7rr\" (UniqueName: \"kubernetes.io/projected/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-kube-api-access-gw7rr\") pod \"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a\" (UID: \"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a\") " Nov 26 22:47:50 crc kubenswrapper[4903]: I1126 22:47:50.926004 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-config-data\") pod \"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a\" (UID: \"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a\") " Nov 26 22:47:50 crc kubenswrapper[4903]: I1126 22:47:50.926070 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-combined-ca-bundle\") pod \"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a\" (UID: \"7fb6a2cb-3364-4848-8eb4-a0cbc97df32a\") " Nov 26 22:47:50 crc kubenswrapper[4903]: I1126 22:47:50.926094 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-config-data-custom\") pod \"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae\" (UID: \"4ee458b7-00d7-40e4-8c43-8c61e6fb87ae\") " Nov 26 22:47:50 crc kubenswrapper[4903]: I1126 22:47:50.933083 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "7fb6a2cb-3364-4848-8eb4-a0cbc97df32a" (UID: "7fb6a2cb-3364-4848-8eb4-a0cbc97df32a"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:47:50 crc kubenswrapper[4903]: I1126 22:47:50.933170 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "4ee458b7-00d7-40e4-8c43-8c61e6fb87ae" (UID: "4ee458b7-00d7-40e4-8c43-8c61e6fb87ae"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:47:50 crc kubenswrapper[4903]: I1126 22:47:50.942545 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-kube-api-access-gw7rr" (OuterVolumeSpecName: "kube-api-access-gw7rr") pod "7fb6a2cb-3364-4848-8eb4-a0cbc97df32a" (UID: "7fb6a2cb-3364-4848-8eb4-a0cbc97df32a"). InnerVolumeSpecName "kube-api-access-gw7rr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:47:50 crc kubenswrapper[4903]: I1126 22:47:50.957902 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-kube-api-access-xw5mb" (OuterVolumeSpecName: "kube-api-access-xw5mb") pod "4ee458b7-00d7-40e4-8c43-8c61e6fb87ae" (UID: "4ee458b7-00d7-40e4-8c43-8c61e6fb87ae"). InnerVolumeSpecName "kube-api-access-xw5mb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:47:50 crc kubenswrapper[4903]: I1126 22:47:50.968755 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7fb6a2cb-3364-4848-8eb4-a0cbc97df32a" (UID: "7fb6a2cb-3364-4848-8eb4-a0cbc97df32a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:47:50 crc kubenswrapper[4903]: I1126 22:47:50.994324 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "7fb6a2cb-3364-4848-8eb4-a0cbc97df32a" (UID: "7fb6a2cb-3364-4848-8eb4-a0cbc97df32a"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:47:51 crc kubenswrapper[4903]: I1126 22:47:51.014866 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4ee458b7-00d7-40e4-8c43-8c61e6fb87ae" (UID: "4ee458b7-00d7-40e4-8c43-8c61e6fb87ae"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:47:51 crc kubenswrapper[4903]: I1126 22:47:51.016326 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "4ee458b7-00d7-40e4-8c43-8c61e6fb87ae" (UID: "4ee458b7-00d7-40e4-8c43-8c61e6fb87ae"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:47:51 crc kubenswrapper[4903]: I1126 22:47:51.024291 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-config-data" (OuterVolumeSpecName: "config-data") pod "4ee458b7-00d7-40e4-8c43-8c61e6fb87ae" (UID: "4ee458b7-00d7-40e4-8c43-8c61e6fb87ae"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:47:51 crc kubenswrapper[4903]: I1126 22:47:51.026404 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "4ee458b7-00d7-40e4-8c43-8c61e6fb87ae" (UID: "4ee458b7-00d7-40e4-8c43-8c61e6fb87ae"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:47:51 crc kubenswrapper[4903]: I1126 22:47:51.029206 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-config-data" (OuterVolumeSpecName: "config-data") pod "7fb6a2cb-3364-4848-8eb4-a0cbc97df32a" (UID: "7fb6a2cb-3364-4848-8eb4-a0cbc97df32a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:47:51 crc kubenswrapper[4903]: I1126 22:47:51.029335 4903 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:51 crc kubenswrapper[4903]: I1126 22:47:51.029362 4903 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:51 crc kubenswrapper[4903]: I1126 22:47:51.029376 4903 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:51 crc kubenswrapper[4903]: I1126 22:47:51.029388 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:51 crc kubenswrapper[4903]: I1126 22:47:51.029401 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gw7rr\" (UniqueName: \"kubernetes.io/projected/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-kube-api-access-gw7rr\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:51 crc kubenswrapper[4903]: I1126 22:47:51.029416 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:51 crc kubenswrapper[4903]: I1126 22:47:51.029429 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:51 crc kubenswrapper[4903]: I1126 22:47:51.029441 4903 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:51 crc kubenswrapper[4903]: I1126 22:47:51.029450 4903 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:51 crc kubenswrapper[4903]: I1126 22:47:51.029458 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xw5mb\" (UniqueName: \"kubernetes.io/projected/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-kube-api-access-xw5mb\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:51 crc kubenswrapper[4903]: I1126 22:47:51.029467 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:51 crc kubenswrapper[4903]: I1126 22:47:51.045805 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "7fb6a2cb-3364-4848-8eb4-a0cbc97df32a" (UID: "7fb6a2cb-3364-4848-8eb4-a0cbc97df32a"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:47:51 crc kubenswrapper[4903]: I1126 22:47:51.132310 4903 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 22:47:51 crc kubenswrapper[4903]: I1126 22:47:51.850742 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-67fbd86d69-b26tj" Nov 26 22:47:51 crc kubenswrapper[4903]: I1126 22:47:51.850889 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-6fff56d55f-8bbzh" Nov 26 22:47:51 crc kubenswrapper[4903]: I1126 22:47:51.898228 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-67fbd86d69-b26tj"] Nov 26 22:47:51 crc kubenswrapper[4903]: I1126 22:47:51.910846 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-67fbd86d69-b26tj"] Nov 26 22:47:51 crc kubenswrapper[4903]: I1126 22:47:51.921519 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-6fff56d55f-8bbzh"] Nov 26 22:47:51 crc kubenswrapper[4903]: I1126 22:47:51.933220 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-6fff56d55f-8bbzh"] Nov 26 22:47:52 crc kubenswrapper[4903]: I1126 22:47:52.046562 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ee458b7-00d7-40e4-8c43-8c61e6fb87ae" path="/var/lib/kubelet/pods/4ee458b7-00d7-40e4-8c43-8c61e6fb87ae/volumes" Nov 26 22:47:52 crc kubenswrapper[4903]: I1126 22:47:52.047798 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7fb6a2cb-3364-4848-8eb4-a0cbc97df32a" path="/var/lib/kubelet/pods/7fb6a2cb-3364-4848-8eb4-a0cbc97df32a/volumes" Nov 26 22:47:53 crc kubenswrapper[4903]: I1126 22:47:53.882812 4903 generic.go:334] "Generic (PLEG): container finished" podID="f32ba682-7919-4290-adff-40b16ea07fed" containerID="936946124e3bc647506c397c430c3fbd4e1989ba82887176defc159462b6d068" exitCode=0 Nov 26 22:47:53 crc kubenswrapper[4903]: I1126 22:47:53.882898 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f32ba682-7919-4290-adff-40b16ea07fed","Type":"ContainerDied","Data":"936946124e3bc647506c397c430c3fbd4e1989ba82887176defc159462b6d068"} Nov 26 22:47:53 crc kubenswrapper[4903]: I1126 22:47:53.885478 4903 generic.go:334] "Generic (PLEG): container finished" podID="5c06e745-2d71-48e5-9cf2-e361471b9b74" containerID="19af02228d2db1e30009d1a17e9a0c893fd9d7e1efe8d9fdf3b218e8c76ee7bc" exitCode=0 Nov 26 22:47:53 crc kubenswrapper[4903]: I1126 22:47:53.885509 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"5c06e745-2d71-48e5-9cf2-e361471b9b74","Type":"ContainerDied","Data":"19af02228d2db1e30009d1a17e9a0c893fd9d7e1efe8d9fdf3b218e8c76ee7bc"} Nov 26 22:47:54 crc kubenswrapper[4903]: I1126 22:47:54.847210 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-66bc977bcf-w4wg7" Nov 26 22:47:54 crc kubenswrapper[4903]: I1126 22:47:54.906126 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-6c48d846b-6vkx8"] Nov 26 22:47:54 crc kubenswrapper[4903]: I1126 22:47:54.906322 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-6c48d846b-6vkx8" podUID="ec7ca594-63f3-463d-992d-75a2bec894dc" containerName="heat-engine" containerID="cri-o://99ee967d559430be4daddde8eee86bd479913b4205fef6778bf5864ac6d7adad" gracePeriod=60 Nov 26 22:47:58 crc kubenswrapper[4903]: E1126 22:47:58.747409 4903 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="99ee967d559430be4daddde8eee86bd479913b4205fef6778bf5864ac6d7adad" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 26 22:47:58 crc kubenswrapper[4903]: E1126 22:47:58.751955 4903 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="99ee967d559430be4daddde8eee86bd479913b4205fef6778bf5864ac6d7adad" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 26 22:47:58 crc kubenswrapper[4903]: E1126 22:47:58.754397 4903 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="99ee967d559430be4daddde8eee86bd479913b4205fef6778bf5864ac6d7adad" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 26 22:47:58 crc kubenswrapper[4903]: E1126 22:47:58.754452 4903 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-6c48d846b-6vkx8" podUID="ec7ca594-63f3-463d-992d-75a2bec894dc" containerName="heat-engine" Nov 26 22:47:59 crc kubenswrapper[4903]: I1126 22:47:59.966141 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f32ba682-7919-4290-adff-40b16ea07fed","Type":"ContainerStarted","Data":"491f77c10aec9ce85e93551eddd107a557e77682892e626a9b66044c2051b869"} Nov 26 22:47:59 crc kubenswrapper[4903]: I1126 22:47:59.966679 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:47:59 crc kubenswrapper[4903]: I1126 22:47:59.968762 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"5c06e745-2d71-48e5-9cf2-e361471b9b74","Type":"ContainerStarted","Data":"dba932913bdb67f8af8dd3bed0ba5f96c4e7f94734bf899efd6db654a49d3907"} Nov 26 22:47:59 crc kubenswrapper[4903]: I1126 22:47:59.969477 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 26 22:47:59 crc kubenswrapper[4903]: I1126 22:47:59.970720 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2" event={"ID":"154ea937-525e-406f-bef0-ffd2c360d7e1","Type":"ContainerStarted","Data":"8b2abdf0185bd16468859872e959b4d781d36c3a751a6218e4246adb31a197b1"} Nov 26 22:48:00 crc kubenswrapper[4903]: I1126 22:48:00.009414 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=44.009394049 podStartE2EDuration="44.009394049s" podCreationTimestamp="2025-11-26 22:47:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:47:59.993909785 +0000 UTC m=+1608.684144695" watchObservedRunningTime="2025-11-26 22:48:00.009394049 +0000 UTC m=+1608.699628959" Nov 26 22:48:00 crc kubenswrapper[4903]: I1126 22:48:00.037717 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2" podStartSLOduration=3.049418074 podStartE2EDuration="14.03753733s" podCreationTimestamp="2025-11-26 22:47:46 +0000 UTC" firstStartedPulling="2025-11-26 22:47:47.852068954 +0000 UTC m=+1596.542303864" lastFinishedPulling="2025-11-26 22:47:58.84018821 +0000 UTC m=+1607.530423120" observedRunningTime="2025-11-26 22:48:00.019011186 +0000 UTC m=+1608.709246096" watchObservedRunningTime="2025-11-26 22:48:00.03753733 +0000 UTC m=+1608.727772240" Nov 26 22:48:00 crc kubenswrapper[4903]: I1126 22:48:00.068022 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=44.068001864 podStartE2EDuration="44.068001864s" podCreationTimestamp="2025-11-26 22:47:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 22:48:00.047913957 +0000 UTC m=+1608.738148887" watchObservedRunningTime="2025-11-26 22:48:00.068001864 +0000 UTC m=+1608.758236774" Nov 26 22:48:01 crc kubenswrapper[4903]: I1126 22:48:01.910189 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 26 22:48:04 crc kubenswrapper[4903]: I1126 22:48:04.591386 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-njqgv"] Nov 26 22:48:04 crc kubenswrapper[4903]: I1126 22:48:04.604800 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-njqgv"] Nov 26 22:48:04 crc kubenswrapper[4903]: I1126 22:48:04.708451 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-cdw9p"] Nov 26 22:48:04 crc kubenswrapper[4903]: E1126 22:48:04.709202 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ee458b7-00d7-40e4-8c43-8c61e6fb87ae" containerName="heat-cfnapi" Nov 26 22:48:04 crc kubenswrapper[4903]: I1126 22:48:04.709225 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ee458b7-00d7-40e4-8c43-8c61e6fb87ae" containerName="heat-cfnapi" Nov 26 22:48:04 crc kubenswrapper[4903]: E1126 22:48:04.709262 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fb6a2cb-3364-4848-8eb4-a0cbc97df32a" containerName="heat-api" Nov 26 22:48:04 crc kubenswrapper[4903]: I1126 22:48:04.709273 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fb6a2cb-3364-4848-8eb4-a0cbc97df32a" containerName="heat-api" Nov 26 22:48:04 crc kubenswrapper[4903]: I1126 22:48:04.709601 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ee458b7-00d7-40e4-8c43-8c61e6fb87ae" containerName="heat-cfnapi" Nov 26 22:48:04 crc kubenswrapper[4903]: I1126 22:48:04.709630 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="7fb6a2cb-3364-4848-8eb4-a0cbc97df32a" containerName="heat-api" Nov 26 22:48:04 crc kubenswrapper[4903]: I1126 22:48:04.710729 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-cdw9p" Nov 26 22:48:04 crc kubenswrapper[4903]: I1126 22:48:04.712475 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 26 22:48:04 crc kubenswrapper[4903]: I1126 22:48:04.719739 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-cdw9p"] Nov 26 22:48:04 crc kubenswrapper[4903]: I1126 22:48:04.794228 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9q6dn\" (UniqueName: \"kubernetes.io/projected/1307be14-ef92-4ac2-97c5-2851e1871a45-kube-api-access-9q6dn\") pod \"aodh-db-sync-cdw9p\" (UID: \"1307be14-ef92-4ac2-97c5-2851e1871a45\") " pod="openstack/aodh-db-sync-cdw9p" Nov 26 22:48:04 crc kubenswrapper[4903]: I1126 22:48:04.794268 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1307be14-ef92-4ac2-97c5-2851e1871a45-scripts\") pod \"aodh-db-sync-cdw9p\" (UID: \"1307be14-ef92-4ac2-97c5-2851e1871a45\") " pod="openstack/aodh-db-sync-cdw9p" Nov 26 22:48:04 crc kubenswrapper[4903]: I1126 22:48:04.794350 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1307be14-ef92-4ac2-97c5-2851e1871a45-combined-ca-bundle\") pod \"aodh-db-sync-cdw9p\" (UID: \"1307be14-ef92-4ac2-97c5-2851e1871a45\") " pod="openstack/aodh-db-sync-cdw9p" Nov 26 22:48:04 crc kubenswrapper[4903]: I1126 22:48:04.794447 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1307be14-ef92-4ac2-97c5-2851e1871a45-config-data\") pod \"aodh-db-sync-cdw9p\" (UID: \"1307be14-ef92-4ac2-97c5-2851e1871a45\") " pod="openstack/aodh-db-sync-cdw9p" Nov 26 22:48:04 crc kubenswrapper[4903]: I1126 22:48:04.897084 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9q6dn\" (UniqueName: \"kubernetes.io/projected/1307be14-ef92-4ac2-97c5-2851e1871a45-kube-api-access-9q6dn\") pod \"aodh-db-sync-cdw9p\" (UID: \"1307be14-ef92-4ac2-97c5-2851e1871a45\") " pod="openstack/aodh-db-sync-cdw9p" Nov 26 22:48:04 crc kubenswrapper[4903]: I1126 22:48:04.897157 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1307be14-ef92-4ac2-97c5-2851e1871a45-scripts\") pod \"aodh-db-sync-cdw9p\" (UID: \"1307be14-ef92-4ac2-97c5-2851e1871a45\") " pod="openstack/aodh-db-sync-cdw9p" Nov 26 22:48:04 crc kubenswrapper[4903]: I1126 22:48:04.897344 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1307be14-ef92-4ac2-97c5-2851e1871a45-combined-ca-bundle\") pod \"aodh-db-sync-cdw9p\" (UID: \"1307be14-ef92-4ac2-97c5-2851e1871a45\") " pod="openstack/aodh-db-sync-cdw9p" Nov 26 22:48:04 crc kubenswrapper[4903]: I1126 22:48:04.897551 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1307be14-ef92-4ac2-97c5-2851e1871a45-config-data\") pod \"aodh-db-sync-cdw9p\" (UID: \"1307be14-ef92-4ac2-97c5-2851e1871a45\") " pod="openstack/aodh-db-sync-cdw9p" Nov 26 22:48:04 crc kubenswrapper[4903]: I1126 22:48:04.905516 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1307be14-ef92-4ac2-97c5-2851e1871a45-combined-ca-bundle\") pod \"aodh-db-sync-cdw9p\" (UID: \"1307be14-ef92-4ac2-97c5-2851e1871a45\") " pod="openstack/aodh-db-sync-cdw9p" Nov 26 22:48:04 crc kubenswrapper[4903]: I1126 22:48:04.906362 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1307be14-ef92-4ac2-97c5-2851e1871a45-scripts\") pod \"aodh-db-sync-cdw9p\" (UID: \"1307be14-ef92-4ac2-97c5-2851e1871a45\") " pod="openstack/aodh-db-sync-cdw9p" Nov 26 22:48:04 crc kubenswrapper[4903]: I1126 22:48:04.921394 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1307be14-ef92-4ac2-97c5-2851e1871a45-config-data\") pod \"aodh-db-sync-cdw9p\" (UID: \"1307be14-ef92-4ac2-97c5-2851e1871a45\") " pod="openstack/aodh-db-sync-cdw9p" Nov 26 22:48:04 crc kubenswrapper[4903]: I1126 22:48:04.931967 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9q6dn\" (UniqueName: \"kubernetes.io/projected/1307be14-ef92-4ac2-97c5-2851e1871a45-kube-api-access-9q6dn\") pod \"aodh-db-sync-cdw9p\" (UID: \"1307be14-ef92-4ac2-97c5-2851e1871a45\") " pod="openstack/aodh-db-sync-cdw9p" Nov 26 22:48:05 crc kubenswrapper[4903]: I1126 22:48:05.037201 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-cdw9p" Nov 26 22:48:05 crc kubenswrapper[4903]: I1126 22:48:05.564561 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-cdw9p"] Nov 26 22:48:06 crc kubenswrapper[4903]: I1126 22:48:06.047085 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45c66290-f98d-4c93-ac95-b63cd9e0777c" path="/var/lib/kubelet/pods/45c66290-f98d-4c93-ac95-b63cd9e0777c/volumes" Nov 26 22:48:06 crc kubenswrapper[4903]: I1126 22:48:06.069662 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-cdw9p" event={"ID":"1307be14-ef92-4ac2-97c5-2851e1871a45","Type":"ContainerStarted","Data":"05fbcfc920517306e74fe0f4b52f3ad60a64e0b4794f0a3c806277222b87648e"} Nov 26 22:48:08 crc kubenswrapper[4903]: E1126 22:48:08.742763 4903 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="99ee967d559430be4daddde8eee86bd479913b4205fef6778bf5864ac6d7adad" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 26 22:48:08 crc kubenswrapper[4903]: E1126 22:48:08.745216 4903 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="99ee967d559430be4daddde8eee86bd479913b4205fef6778bf5864ac6d7adad" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 26 22:48:08 crc kubenswrapper[4903]: E1126 22:48:08.747938 4903 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="99ee967d559430be4daddde8eee86bd479913b4205fef6778bf5864ac6d7adad" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 26 22:48:08 crc kubenswrapper[4903]: E1126 22:48:08.747981 4903 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-6c48d846b-6vkx8" podUID="ec7ca594-63f3-463d-992d-75a2bec894dc" containerName="heat-engine" Nov 26 22:48:11 crc kubenswrapper[4903]: I1126 22:48:11.162617 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-cdw9p" event={"ID":"1307be14-ef92-4ac2-97c5-2851e1871a45","Type":"ContainerStarted","Data":"f9f34976525967f8eee08097ab70f55494bcd7244ed8ad63d8b1bb870e46197d"} Nov 26 22:48:11 crc kubenswrapper[4903]: I1126 22:48:11.171595 4903 generic.go:334] "Generic (PLEG): container finished" podID="154ea937-525e-406f-bef0-ffd2c360d7e1" containerID="8b2abdf0185bd16468859872e959b4d781d36c3a751a6218e4246adb31a197b1" exitCode=0 Nov 26 22:48:11 crc kubenswrapper[4903]: I1126 22:48:11.171745 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2" event={"ID":"154ea937-525e-406f-bef0-ffd2c360d7e1","Type":"ContainerDied","Data":"8b2abdf0185bd16468859872e959b4d781d36c3a751a6218e4246adb31a197b1"} Nov 26 22:48:11 crc kubenswrapper[4903]: I1126 22:48:11.197870 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-cdw9p" podStartSLOduration=2.016250149 podStartE2EDuration="7.197847384s" podCreationTimestamp="2025-11-26 22:48:04 +0000 UTC" firstStartedPulling="2025-11-26 22:48:05.580653648 +0000 UTC m=+1614.270888568" lastFinishedPulling="2025-11-26 22:48:10.762250893 +0000 UTC m=+1619.452485803" observedRunningTime="2025-11-26 22:48:11.184661101 +0000 UTC m=+1619.874896091" watchObservedRunningTime="2025-11-26 22:48:11.197847384 +0000 UTC m=+1619.888082334" Nov 26 22:48:12 crc kubenswrapper[4903]: I1126 22:48:12.787894 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2" Nov 26 22:48:12 crc kubenswrapper[4903]: I1126 22:48:12.916955 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/154ea937-525e-406f-bef0-ffd2c360d7e1-repo-setup-combined-ca-bundle\") pod \"154ea937-525e-406f-bef0-ffd2c360d7e1\" (UID: \"154ea937-525e-406f-bef0-ffd2c360d7e1\") " Nov 26 22:48:12 crc kubenswrapper[4903]: I1126 22:48:12.917010 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/154ea937-525e-406f-bef0-ffd2c360d7e1-inventory\") pod \"154ea937-525e-406f-bef0-ffd2c360d7e1\" (UID: \"154ea937-525e-406f-bef0-ffd2c360d7e1\") " Nov 26 22:48:12 crc kubenswrapper[4903]: I1126 22:48:12.917092 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/154ea937-525e-406f-bef0-ffd2c360d7e1-ssh-key\") pod \"154ea937-525e-406f-bef0-ffd2c360d7e1\" (UID: \"154ea937-525e-406f-bef0-ffd2c360d7e1\") " Nov 26 22:48:12 crc kubenswrapper[4903]: I1126 22:48:12.917373 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bvvpf\" (UniqueName: \"kubernetes.io/projected/154ea937-525e-406f-bef0-ffd2c360d7e1-kube-api-access-bvvpf\") pod \"154ea937-525e-406f-bef0-ffd2c360d7e1\" (UID: \"154ea937-525e-406f-bef0-ffd2c360d7e1\") " Nov 26 22:48:12 crc kubenswrapper[4903]: I1126 22:48:12.923681 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/154ea937-525e-406f-bef0-ffd2c360d7e1-kube-api-access-bvvpf" (OuterVolumeSpecName: "kube-api-access-bvvpf") pod "154ea937-525e-406f-bef0-ffd2c360d7e1" (UID: "154ea937-525e-406f-bef0-ffd2c360d7e1"). InnerVolumeSpecName "kube-api-access-bvvpf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:48:12 crc kubenswrapper[4903]: I1126 22:48:12.942047 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/154ea937-525e-406f-bef0-ffd2c360d7e1-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "154ea937-525e-406f-bef0-ffd2c360d7e1" (UID: "154ea937-525e-406f-bef0-ffd2c360d7e1"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:48:12 crc kubenswrapper[4903]: I1126 22:48:12.953415 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/154ea937-525e-406f-bef0-ffd2c360d7e1-inventory" (OuterVolumeSpecName: "inventory") pod "154ea937-525e-406f-bef0-ffd2c360d7e1" (UID: "154ea937-525e-406f-bef0-ffd2c360d7e1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:48:12 crc kubenswrapper[4903]: I1126 22:48:12.967574 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/154ea937-525e-406f-bef0-ffd2c360d7e1-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "154ea937-525e-406f-bef0-ffd2c360d7e1" (UID: "154ea937-525e-406f-bef0-ffd2c360d7e1"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.020404 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bvvpf\" (UniqueName: \"kubernetes.io/projected/154ea937-525e-406f-bef0-ffd2c360d7e1-kube-api-access-bvvpf\") on node \"crc\" DevicePath \"\"" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.020439 4903 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/154ea937-525e-406f-bef0-ffd2c360d7e1-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.020453 4903 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/154ea937-525e-406f-bef0-ffd2c360d7e1-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.020466 4903 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/154ea937-525e-406f-bef0-ffd2c360d7e1-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.207855 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2" event={"ID":"154ea937-525e-406f-bef0-ffd2c360d7e1","Type":"ContainerDied","Data":"900ac3461f1988931644f70f979371cade6b5a6f7954e8968c0a7f69902c5ecf"} Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.207905 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="900ac3461f1988931644f70f979371cade6b5a6f7954e8968c0a7f69902c5ecf" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.207970 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.210192 4903 generic.go:334] "Generic (PLEG): container finished" podID="ec7ca594-63f3-463d-992d-75a2bec894dc" containerID="99ee967d559430be4daddde8eee86bd479913b4205fef6778bf5864ac6d7adad" exitCode=0 Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.210238 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-6c48d846b-6vkx8" event={"ID":"ec7ca594-63f3-463d-992d-75a2bec894dc","Type":"ContainerDied","Data":"99ee967d559430be4daddde8eee86bd479913b4205fef6778bf5864ac6d7adad"} Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.312257 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-s8hcp"] Nov 26 22:48:13 crc kubenswrapper[4903]: E1126 22:48:13.313075 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="154ea937-525e-406f-bef0-ffd2c360d7e1" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.313142 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="154ea937-525e-406f-bef0-ffd2c360d7e1" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.313459 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="154ea937-525e-406f-bef0-ffd2c360d7e1" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.314660 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-s8hcp" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.317999 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.318304 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.318485 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-62bf2" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.322884 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-s8hcp"] Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.323050 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.451055 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzx76\" (UniqueName: \"kubernetes.io/projected/f4801ee8-4d4e-4459-8289-60e5db96a3b9-kube-api-access-rzx76\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-s8hcp\" (UID: \"f4801ee8-4d4e-4459-8289-60e5db96a3b9\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-s8hcp" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.451162 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f4801ee8-4d4e-4459-8289-60e5db96a3b9-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-s8hcp\" (UID: \"f4801ee8-4d4e-4459-8289-60e5db96a3b9\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-s8hcp" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.451214 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f4801ee8-4d4e-4459-8289-60e5db96a3b9-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-s8hcp\" (UID: \"f4801ee8-4d4e-4459-8289-60e5db96a3b9\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-s8hcp" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.521003 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-6c48d846b-6vkx8" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.554565 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzx76\" (UniqueName: \"kubernetes.io/projected/f4801ee8-4d4e-4459-8289-60e5db96a3b9-kube-api-access-rzx76\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-s8hcp\" (UID: \"f4801ee8-4d4e-4459-8289-60e5db96a3b9\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-s8hcp" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.554652 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f4801ee8-4d4e-4459-8289-60e5db96a3b9-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-s8hcp\" (UID: \"f4801ee8-4d4e-4459-8289-60e5db96a3b9\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-s8hcp" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.554713 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f4801ee8-4d4e-4459-8289-60e5db96a3b9-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-s8hcp\" (UID: \"f4801ee8-4d4e-4459-8289-60e5db96a3b9\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-s8hcp" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.585558 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f4801ee8-4d4e-4459-8289-60e5db96a3b9-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-s8hcp\" (UID: \"f4801ee8-4d4e-4459-8289-60e5db96a3b9\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-s8hcp" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.587715 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f4801ee8-4d4e-4459-8289-60e5db96a3b9-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-s8hcp\" (UID: \"f4801ee8-4d4e-4459-8289-60e5db96a3b9\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-s8hcp" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.610795 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzx76\" (UniqueName: \"kubernetes.io/projected/f4801ee8-4d4e-4459-8289-60e5db96a3b9-kube-api-access-rzx76\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-s8hcp\" (UID: \"f4801ee8-4d4e-4459-8289-60e5db96a3b9\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-s8hcp" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.641860 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-s8hcp" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.656004 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bpxrr\" (UniqueName: \"kubernetes.io/projected/ec7ca594-63f3-463d-992d-75a2bec894dc-kube-api-access-bpxrr\") pod \"ec7ca594-63f3-463d-992d-75a2bec894dc\" (UID: \"ec7ca594-63f3-463d-992d-75a2bec894dc\") " Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.656056 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec7ca594-63f3-463d-992d-75a2bec894dc-combined-ca-bundle\") pod \"ec7ca594-63f3-463d-992d-75a2bec894dc\" (UID: \"ec7ca594-63f3-463d-992d-75a2bec894dc\") " Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.656414 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec7ca594-63f3-463d-992d-75a2bec894dc-config-data\") pod \"ec7ca594-63f3-463d-992d-75a2bec894dc\" (UID: \"ec7ca594-63f3-463d-992d-75a2bec894dc\") " Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.656495 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ec7ca594-63f3-463d-992d-75a2bec894dc-config-data-custom\") pod \"ec7ca594-63f3-463d-992d-75a2bec894dc\" (UID: \"ec7ca594-63f3-463d-992d-75a2bec894dc\") " Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.660760 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec7ca594-63f3-463d-992d-75a2bec894dc-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "ec7ca594-63f3-463d-992d-75a2bec894dc" (UID: "ec7ca594-63f3-463d-992d-75a2bec894dc"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.661662 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec7ca594-63f3-463d-992d-75a2bec894dc-kube-api-access-bpxrr" (OuterVolumeSpecName: "kube-api-access-bpxrr") pod "ec7ca594-63f3-463d-992d-75a2bec894dc" (UID: "ec7ca594-63f3-463d-992d-75a2bec894dc"). InnerVolumeSpecName "kube-api-access-bpxrr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.691644 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec7ca594-63f3-463d-992d-75a2bec894dc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ec7ca594-63f3-463d-992d-75a2bec894dc" (UID: "ec7ca594-63f3-463d-992d-75a2bec894dc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.738202 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec7ca594-63f3-463d-992d-75a2bec894dc-config-data" (OuterVolumeSpecName: "config-data") pod "ec7ca594-63f3-463d-992d-75a2bec894dc" (UID: "ec7ca594-63f3-463d-992d-75a2bec894dc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.759345 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec7ca594-63f3-463d-992d-75a2bec894dc-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.759376 4903 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ec7ca594-63f3-463d-992d-75a2bec894dc-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.759387 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bpxrr\" (UniqueName: \"kubernetes.io/projected/ec7ca594-63f3-463d-992d-75a2bec894dc-kube-api-access-bpxrr\") on node \"crc\" DevicePath \"\"" Nov 26 22:48:13 crc kubenswrapper[4903]: I1126 22:48:13.759395 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec7ca594-63f3-463d-992d-75a2bec894dc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:48:14 crc kubenswrapper[4903]: I1126 22:48:14.228884 4903 generic.go:334] "Generic (PLEG): container finished" podID="1307be14-ef92-4ac2-97c5-2851e1871a45" containerID="f9f34976525967f8eee08097ab70f55494bcd7244ed8ad63d8b1bb870e46197d" exitCode=0 Nov 26 22:48:14 crc kubenswrapper[4903]: I1126 22:48:14.229420 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-cdw9p" event={"ID":"1307be14-ef92-4ac2-97c5-2851e1871a45","Type":"ContainerDied","Data":"f9f34976525967f8eee08097ab70f55494bcd7244ed8ad63d8b1bb870e46197d"} Nov 26 22:48:14 crc kubenswrapper[4903]: I1126 22:48:14.233189 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-6c48d846b-6vkx8" event={"ID":"ec7ca594-63f3-463d-992d-75a2bec894dc","Type":"ContainerDied","Data":"aa8a5802ee424018214497d59b00733d1682229188c99f9641731526a3175e49"} Nov 26 22:48:14 crc kubenswrapper[4903]: I1126 22:48:14.233281 4903 scope.go:117] "RemoveContainer" containerID="99ee967d559430be4daddde8eee86bd479913b4205fef6778bf5864ac6d7adad" Nov 26 22:48:14 crc kubenswrapper[4903]: I1126 22:48:14.233477 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-6c48d846b-6vkx8" Nov 26 22:48:14 crc kubenswrapper[4903]: W1126 22:48:14.297200 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf4801ee8_4d4e_4459_8289_60e5db96a3b9.slice/crio-76ac3e1174a448d6719af0f4a98bcc6c0b4fd07d2552f1e98facece561ab1c08 WatchSource:0}: Error finding container 76ac3e1174a448d6719af0f4a98bcc6c0b4fd07d2552f1e98facece561ab1c08: Status 404 returned error can't find the container with id 76ac3e1174a448d6719af0f4a98bcc6c0b4fd07d2552f1e98facece561ab1c08 Nov 26 22:48:14 crc kubenswrapper[4903]: I1126 22:48:14.300158 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-6c48d846b-6vkx8"] Nov 26 22:48:14 crc kubenswrapper[4903]: I1126 22:48:14.311594 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-6c48d846b-6vkx8"] Nov 26 22:48:14 crc kubenswrapper[4903]: I1126 22:48:14.325903 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-s8hcp"] Nov 26 22:48:15 crc kubenswrapper[4903]: I1126 22:48:15.251434 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-s8hcp" event={"ID":"f4801ee8-4d4e-4459-8289-60e5db96a3b9","Type":"ContainerStarted","Data":"b2f56034b8e8c4d70fbe1c2ca9b8a0538c078e6d1ce2ba737abccc73b0e74fb3"} Nov 26 22:48:15 crc kubenswrapper[4903]: I1126 22:48:15.251802 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-s8hcp" event={"ID":"f4801ee8-4d4e-4459-8289-60e5db96a3b9","Type":"ContainerStarted","Data":"76ac3e1174a448d6719af0f4a98bcc6c0b4fd07d2552f1e98facece561ab1c08"} Nov 26 22:48:15 crc kubenswrapper[4903]: I1126 22:48:15.270097 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-s8hcp" podStartSLOduration=1.73543175 podStartE2EDuration="2.270076066s" podCreationTimestamp="2025-11-26 22:48:13 +0000 UTC" firstStartedPulling="2025-11-26 22:48:14.300914559 +0000 UTC m=+1622.991149509" lastFinishedPulling="2025-11-26 22:48:14.835558875 +0000 UTC m=+1623.525793825" observedRunningTime="2025-11-26 22:48:15.267675073 +0000 UTC m=+1623.957910023" watchObservedRunningTime="2025-11-26 22:48:15.270076066 +0000 UTC m=+1623.960310986" Nov 26 22:48:15 crc kubenswrapper[4903]: I1126 22:48:15.818339 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-cdw9p" Nov 26 22:48:15 crc kubenswrapper[4903]: I1126 22:48:15.915023 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1307be14-ef92-4ac2-97c5-2851e1871a45-combined-ca-bundle\") pod \"1307be14-ef92-4ac2-97c5-2851e1871a45\" (UID: \"1307be14-ef92-4ac2-97c5-2851e1871a45\") " Nov 26 22:48:15 crc kubenswrapper[4903]: I1126 22:48:15.915137 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1307be14-ef92-4ac2-97c5-2851e1871a45-scripts\") pod \"1307be14-ef92-4ac2-97c5-2851e1871a45\" (UID: \"1307be14-ef92-4ac2-97c5-2851e1871a45\") " Nov 26 22:48:15 crc kubenswrapper[4903]: I1126 22:48:15.915247 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1307be14-ef92-4ac2-97c5-2851e1871a45-config-data\") pod \"1307be14-ef92-4ac2-97c5-2851e1871a45\" (UID: \"1307be14-ef92-4ac2-97c5-2851e1871a45\") " Nov 26 22:48:15 crc kubenswrapper[4903]: I1126 22:48:15.915346 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9q6dn\" (UniqueName: \"kubernetes.io/projected/1307be14-ef92-4ac2-97c5-2851e1871a45-kube-api-access-9q6dn\") pod \"1307be14-ef92-4ac2-97c5-2851e1871a45\" (UID: \"1307be14-ef92-4ac2-97c5-2851e1871a45\") " Nov 26 22:48:15 crc kubenswrapper[4903]: I1126 22:48:15.920729 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1307be14-ef92-4ac2-97c5-2851e1871a45-scripts" (OuterVolumeSpecName: "scripts") pod "1307be14-ef92-4ac2-97c5-2851e1871a45" (UID: "1307be14-ef92-4ac2-97c5-2851e1871a45"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:48:15 crc kubenswrapper[4903]: I1126 22:48:15.926859 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1307be14-ef92-4ac2-97c5-2851e1871a45-kube-api-access-9q6dn" (OuterVolumeSpecName: "kube-api-access-9q6dn") pod "1307be14-ef92-4ac2-97c5-2851e1871a45" (UID: "1307be14-ef92-4ac2-97c5-2851e1871a45"). InnerVolumeSpecName "kube-api-access-9q6dn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:48:15 crc kubenswrapper[4903]: I1126 22:48:15.952329 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1307be14-ef92-4ac2-97c5-2851e1871a45-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1307be14-ef92-4ac2-97c5-2851e1871a45" (UID: "1307be14-ef92-4ac2-97c5-2851e1871a45"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:48:15 crc kubenswrapper[4903]: I1126 22:48:15.952821 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1307be14-ef92-4ac2-97c5-2851e1871a45-config-data" (OuterVolumeSpecName: "config-data") pod "1307be14-ef92-4ac2-97c5-2851e1871a45" (UID: "1307be14-ef92-4ac2-97c5-2851e1871a45"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:48:16 crc kubenswrapper[4903]: I1126 22:48:16.019192 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1307be14-ef92-4ac2-97c5-2851e1871a45-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:48:16 crc kubenswrapper[4903]: I1126 22:48:16.019293 4903 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1307be14-ef92-4ac2-97c5-2851e1871a45-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:48:16 crc kubenswrapper[4903]: I1126 22:48:16.019361 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1307be14-ef92-4ac2-97c5-2851e1871a45-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:48:16 crc kubenswrapper[4903]: I1126 22:48:16.019415 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9q6dn\" (UniqueName: \"kubernetes.io/projected/1307be14-ef92-4ac2-97c5-2851e1871a45-kube-api-access-9q6dn\") on node \"crc\" DevicePath \"\"" Nov 26 22:48:16 crc kubenswrapper[4903]: I1126 22:48:16.041885 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec7ca594-63f3-463d-992d-75a2bec894dc" path="/var/lib/kubelet/pods/ec7ca594-63f3-463d-992d-75a2bec894dc/volumes" Nov 26 22:48:16 crc kubenswrapper[4903]: I1126 22:48:16.278596 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-cdw9p" event={"ID":"1307be14-ef92-4ac2-97c5-2851e1871a45","Type":"ContainerDied","Data":"05fbcfc920517306e74fe0f4b52f3ad60a64e0b4794f0a3c806277222b87648e"} Nov 26 22:48:16 crc kubenswrapper[4903]: I1126 22:48:16.278653 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="05fbcfc920517306e74fe0f4b52f3ad60a64e0b4794f0a3c806277222b87648e" Nov 26 22:48:16 crc kubenswrapper[4903]: I1126 22:48:16.278774 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-cdw9p" Nov 26 22:48:16 crc kubenswrapper[4903]: I1126 22:48:16.926912 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 26 22:48:16 crc kubenswrapper[4903]: I1126 22:48:16.943978 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 26 22:48:17 crc kubenswrapper[4903]: I1126 22:48:17.304337 4903 scope.go:117] "RemoveContainer" containerID="815cb31ccc71c0f5bf6c756f7f032647662cd9ec3d847d5e988f99acc6f0d477" Nov 26 22:48:17 crc kubenswrapper[4903]: I1126 22:48:17.340577 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Nov 26 22:48:17 crc kubenswrapper[4903]: I1126 22:48:17.340953 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="83b4acbb-6ade-4bd9-9506-4a0a95829480" containerName="aodh-api" containerID="cri-o://07aa88fbd138008906bb8b408c2b6e39c86a742c7d941e733536fd097b8c9fc8" gracePeriod=30 Nov 26 22:48:17 crc kubenswrapper[4903]: I1126 22:48:17.341277 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="83b4acbb-6ade-4bd9-9506-4a0a95829480" containerName="aodh-notifier" containerID="cri-o://f626787be4ced39e2e9fba37b148c92cb7728b70e29552779650c4dd06e40a83" gracePeriod=30 Nov 26 22:48:17 crc kubenswrapper[4903]: I1126 22:48:17.341411 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="83b4acbb-6ade-4bd9-9506-4a0a95829480" containerName="aodh-evaluator" containerID="cri-o://838928e083bc7d4d767dd84268306d7317180ad54fab3966b563aebea4741396" gracePeriod=30 Nov 26 22:48:17 crc kubenswrapper[4903]: I1126 22:48:17.341438 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="83b4acbb-6ade-4bd9-9506-4a0a95829480" containerName="aodh-listener" containerID="cri-o://e153cc7373dc20867bbaa6f0803f381f4b712ec576adda56947e43428d81ed4d" gracePeriod=30 Nov 26 22:48:17 crc kubenswrapper[4903]: I1126 22:48:17.363680 4903 scope.go:117] "RemoveContainer" containerID="df67350bf4d64374226400873f1e620459360fb823347769be1a77f45e46c776" Nov 26 22:48:17 crc kubenswrapper[4903]: I1126 22:48:17.396841 4903 scope.go:117] "RemoveContainer" containerID="d2bdfa7466b3f339611a3f8ba8cbb5f2122fb16dfe6599730212c78f3e1eae51" Nov 26 22:48:18 crc kubenswrapper[4903]: I1126 22:48:18.310135 4903 generic.go:334] "Generic (PLEG): container finished" podID="f4801ee8-4d4e-4459-8289-60e5db96a3b9" containerID="b2f56034b8e8c4d70fbe1c2ca9b8a0538c078e6d1ce2ba737abccc73b0e74fb3" exitCode=0 Nov 26 22:48:18 crc kubenswrapper[4903]: I1126 22:48:18.310268 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-s8hcp" event={"ID":"f4801ee8-4d4e-4459-8289-60e5db96a3b9","Type":"ContainerDied","Data":"b2f56034b8e8c4d70fbe1c2ca9b8a0538c078e6d1ce2ba737abccc73b0e74fb3"} Nov 26 22:48:18 crc kubenswrapper[4903]: I1126 22:48:18.315870 4903 generic.go:334] "Generic (PLEG): container finished" podID="83b4acbb-6ade-4bd9-9506-4a0a95829480" containerID="838928e083bc7d4d767dd84268306d7317180ad54fab3966b563aebea4741396" exitCode=0 Nov 26 22:48:18 crc kubenswrapper[4903]: I1126 22:48:18.315916 4903 generic.go:334] "Generic (PLEG): container finished" podID="83b4acbb-6ade-4bd9-9506-4a0a95829480" containerID="07aa88fbd138008906bb8b408c2b6e39c86a742c7d941e733536fd097b8c9fc8" exitCode=0 Nov 26 22:48:18 crc kubenswrapper[4903]: I1126 22:48:18.315983 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"83b4acbb-6ade-4bd9-9506-4a0a95829480","Type":"ContainerDied","Data":"838928e083bc7d4d767dd84268306d7317180ad54fab3966b563aebea4741396"} Nov 26 22:48:18 crc kubenswrapper[4903]: I1126 22:48:18.316065 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"83b4acbb-6ade-4bd9-9506-4a0a95829480","Type":"ContainerDied","Data":"07aa88fbd138008906bb8b408c2b6e39c86a742c7d941e733536fd097b8c9fc8"} Nov 26 22:48:19 crc kubenswrapper[4903]: I1126 22:48:19.911419 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-s8hcp" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.026040 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f4801ee8-4d4e-4459-8289-60e5db96a3b9-ssh-key\") pod \"f4801ee8-4d4e-4459-8289-60e5db96a3b9\" (UID: \"f4801ee8-4d4e-4459-8289-60e5db96a3b9\") " Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.026149 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rzx76\" (UniqueName: \"kubernetes.io/projected/f4801ee8-4d4e-4459-8289-60e5db96a3b9-kube-api-access-rzx76\") pod \"f4801ee8-4d4e-4459-8289-60e5db96a3b9\" (UID: \"f4801ee8-4d4e-4459-8289-60e5db96a3b9\") " Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.026178 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f4801ee8-4d4e-4459-8289-60e5db96a3b9-inventory\") pod \"f4801ee8-4d4e-4459-8289-60e5db96a3b9\" (UID: \"f4801ee8-4d4e-4459-8289-60e5db96a3b9\") " Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.057205 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4801ee8-4d4e-4459-8289-60e5db96a3b9-kube-api-access-rzx76" (OuterVolumeSpecName: "kube-api-access-rzx76") pod "f4801ee8-4d4e-4459-8289-60e5db96a3b9" (UID: "f4801ee8-4d4e-4459-8289-60e5db96a3b9"). InnerVolumeSpecName "kube-api-access-rzx76". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.089959 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4801ee8-4d4e-4459-8289-60e5db96a3b9-inventory" (OuterVolumeSpecName: "inventory") pod "f4801ee8-4d4e-4459-8289-60e5db96a3b9" (UID: "f4801ee8-4d4e-4459-8289-60e5db96a3b9"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.121480 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4801ee8-4d4e-4459-8289-60e5db96a3b9-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "f4801ee8-4d4e-4459-8289-60e5db96a3b9" (UID: "f4801ee8-4d4e-4459-8289-60e5db96a3b9"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.129318 4903 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f4801ee8-4d4e-4459-8289-60e5db96a3b9-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.129357 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rzx76\" (UniqueName: \"kubernetes.io/projected/f4801ee8-4d4e-4459-8289-60e5db96a3b9-kube-api-access-rzx76\") on node \"crc\" DevicePath \"\"" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.129375 4903 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f4801ee8-4d4e-4459-8289-60e5db96a3b9-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.356135 4903 generic.go:334] "Generic (PLEG): container finished" podID="83b4acbb-6ade-4bd9-9506-4a0a95829480" containerID="e153cc7373dc20867bbaa6f0803f381f4b712ec576adda56947e43428d81ed4d" exitCode=0 Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.356168 4903 generic.go:334] "Generic (PLEG): container finished" podID="83b4acbb-6ade-4bd9-9506-4a0a95829480" containerID="f626787be4ced39e2e9fba37b148c92cb7728b70e29552779650c4dd06e40a83" exitCode=0 Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.356250 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"83b4acbb-6ade-4bd9-9506-4a0a95829480","Type":"ContainerDied","Data":"e153cc7373dc20867bbaa6f0803f381f4b712ec576adda56947e43428d81ed4d"} Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.356284 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"83b4acbb-6ade-4bd9-9506-4a0a95829480","Type":"ContainerDied","Data":"f626787be4ced39e2e9fba37b148c92cb7728b70e29552779650c4dd06e40a83"} Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.365078 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-s8hcp" event={"ID":"f4801ee8-4d4e-4459-8289-60e5db96a3b9","Type":"ContainerDied","Data":"76ac3e1174a448d6719af0f4a98bcc6c0b4fd07d2552f1e98facece561ab1c08"} Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.365130 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="76ac3e1174a448d6719af0f4a98bcc6c0b4fd07d2552f1e98facece561ab1c08" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.365188 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-s8hcp" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.433672 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2"] Nov 26 22:48:20 crc kubenswrapper[4903]: E1126 22:48:20.434322 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4801ee8-4d4e-4459-8289-60e5db96a3b9" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.434349 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4801ee8-4d4e-4459-8289-60e5db96a3b9" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 26 22:48:20 crc kubenswrapper[4903]: E1126 22:48:20.434379 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1307be14-ef92-4ac2-97c5-2851e1871a45" containerName="aodh-db-sync" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.434388 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="1307be14-ef92-4ac2-97c5-2851e1871a45" containerName="aodh-db-sync" Nov 26 22:48:20 crc kubenswrapper[4903]: E1126 22:48:20.434440 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec7ca594-63f3-463d-992d-75a2bec894dc" containerName="heat-engine" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.434449 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec7ca594-63f3-463d-992d-75a2bec894dc" containerName="heat-engine" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.434837 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec7ca594-63f3-463d-992d-75a2bec894dc" containerName="heat-engine" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.434869 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="1307be14-ef92-4ac2-97c5-2851e1871a45" containerName="aodh-db-sync" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.434894 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4801ee8-4d4e-4459-8289-60e5db96a3b9" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.435924 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.439918 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.440012 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.440130 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.440263 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-62bf2" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.451570 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2"] Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.540448 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-72vnq\" (UniqueName: \"kubernetes.io/projected/e9c5ea47-6ef3-44d4-b710-d11a2367448e-kube-api-access-72vnq\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2\" (UID: \"e9c5ea47-6ef3-44d4-b710-d11a2367448e\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.540585 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9c5ea47-6ef3-44d4-b710-d11a2367448e-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2\" (UID: \"e9c5ea47-6ef3-44d4-b710-d11a2367448e\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.540657 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e9c5ea47-6ef3-44d4-b710-d11a2367448e-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2\" (UID: \"e9c5ea47-6ef3-44d4-b710-d11a2367448e\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.540995 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e9c5ea47-6ef3-44d4-b710-d11a2367448e-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2\" (UID: \"e9c5ea47-6ef3-44d4-b710-d11a2367448e\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.542393 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.642995 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83b4acbb-6ade-4bd9-9506-4a0a95829480-config-data\") pod \"83b4acbb-6ade-4bd9-9506-4a0a95829480\" (UID: \"83b4acbb-6ade-4bd9-9506-4a0a95829480\") " Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.643164 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dfbt5\" (UniqueName: \"kubernetes.io/projected/83b4acbb-6ade-4bd9-9506-4a0a95829480-kube-api-access-dfbt5\") pod \"83b4acbb-6ade-4bd9-9506-4a0a95829480\" (UID: \"83b4acbb-6ade-4bd9-9506-4a0a95829480\") " Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.643331 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83b4acbb-6ade-4bd9-9506-4a0a95829480-scripts\") pod \"83b4acbb-6ade-4bd9-9506-4a0a95829480\" (UID: \"83b4acbb-6ade-4bd9-9506-4a0a95829480\") " Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.643356 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/83b4acbb-6ade-4bd9-9506-4a0a95829480-internal-tls-certs\") pod \"83b4acbb-6ade-4bd9-9506-4a0a95829480\" (UID: \"83b4acbb-6ade-4bd9-9506-4a0a95829480\") " Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.643386 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83b4acbb-6ade-4bd9-9506-4a0a95829480-combined-ca-bundle\") pod \"83b4acbb-6ade-4bd9-9506-4a0a95829480\" (UID: \"83b4acbb-6ade-4bd9-9506-4a0a95829480\") " Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.643408 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/83b4acbb-6ade-4bd9-9506-4a0a95829480-public-tls-certs\") pod \"83b4acbb-6ade-4bd9-9506-4a0a95829480\" (UID: \"83b4acbb-6ade-4bd9-9506-4a0a95829480\") " Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.644030 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e9c5ea47-6ef3-44d4-b710-d11a2367448e-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2\" (UID: \"e9c5ea47-6ef3-44d4-b710-d11a2367448e\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.644138 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-72vnq\" (UniqueName: \"kubernetes.io/projected/e9c5ea47-6ef3-44d4-b710-d11a2367448e-kube-api-access-72vnq\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2\" (UID: \"e9c5ea47-6ef3-44d4-b710-d11a2367448e\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.644198 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9c5ea47-6ef3-44d4-b710-d11a2367448e-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2\" (UID: \"e9c5ea47-6ef3-44d4-b710-d11a2367448e\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.644253 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e9c5ea47-6ef3-44d4-b710-d11a2367448e-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2\" (UID: \"e9c5ea47-6ef3-44d4-b710-d11a2367448e\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.649894 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e9c5ea47-6ef3-44d4-b710-d11a2367448e-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2\" (UID: \"e9c5ea47-6ef3-44d4-b710-d11a2367448e\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.652570 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e9c5ea47-6ef3-44d4-b710-d11a2367448e-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2\" (UID: \"e9c5ea47-6ef3-44d4-b710-d11a2367448e\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.657222 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9c5ea47-6ef3-44d4-b710-d11a2367448e-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2\" (UID: \"e9c5ea47-6ef3-44d4-b710-d11a2367448e\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.661805 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83b4acbb-6ade-4bd9-9506-4a0a95829480-kube-api-access-dfbt5" (OuterVolumeSpecName: "kube-api-access-dfbt5") pod "83b4acbb-6ade-4bd9-9506-4a0a95829480" (UID: "83b4acbb-6ade-4bd9-9506-4a0a95829480"). InnerVolumeSpecName "kube-api-access-dfbt5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.663805 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83b4acbb-6ade-4bd9-9506-4a0a95829480-scripts" (OuterVolumeSpecName: "scripts") pod "83b4acbb-6ade-4bd9-9506-4a0a95829480" (UID: "83b4acbb-6ade-4bd9-9506-4a0a95829480"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.667380 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-72vnq\" (UniqueName: \"kubernetes.io/projected/e9c5ea47-6ef3-44d4-b710-d11a2367448e-kube-api-access-72vnq\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2\" (UID: \"e9c5ea47-6ef3-44d4-b710-d11a2367448e\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.728003 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83b4acbb-6ade-4bd9-9506-4a0a95829480-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "83b4acbb-6ade-4bd9-9506-4a0a95829480" (UID: "83b4acbb-6ade-4bd9-9506-4a0a95829480"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.739944 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83b4acbb-6ade-4bd9-9506-4a0a95829480-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "83b4acbb-6ade-4bd9-9506-4a0a95829480" (UID: "83b4acbb-6ade-4bd9-9506-4a0a95829480"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.747085 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dfbt5\" (UniqueName: \"kubernetes.io/projected/83b4acbb-6ade-4bd9-9506-4a0a95829480-kube-api-access-dfbt5\") on node \"crc\" DevicePath \"\"" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.747118 4903 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83b4acbb-6ade-4bd9-9506-4a0a95829480-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.747132 4903 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/83b4acbb-6ade-4bd9-9506-4a0a95829480-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.747144 4903 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/83b4acbb-6ade-4bd9-9506-4a0a95829480-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.761919 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.824082 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83b4acbb-6ade-4bd9-9506-4a0a95829480-config-data" (OuterVolumeSpecName: "config-data") pod "83b4acbb-6ade-4bd9-9506-4a0a95829480" (UID: "83b4acbb-6ade-4bd9-9506-4a0a95829480"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.827873 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83b4acbb-6ade-4bd9-9506-4a0a95829480-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "83b4acbb-6ade-4bd9-9506-4a0a95829480" (UID: "83b4acbb-6ade-4bd9-9506-4a0a95829480"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.851348 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83b4acbb-6ade-4bd9-9506-4a0a95829480-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 22:48:20 crc kubenswrapper[4903]: I1126 22:48:20.851388 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83b4acbb-6ade-4bd9-9506-4a0a95829480-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.396962 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"83b4acbb-6ade-4bd9-9506-4a0a95829480","Type":"ContainerDied","Data":"5af0ab450328c72f31506a298fedfb7bd933b988cc10c84abfd5f705ecb72309"} Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.397037 4903 scope.go:117] "RemoveContainer" containerID="e153cc7373dc20867bbaa6f0803f381f4b712ec576adda56947e43428d81ed4d" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.397071 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.458864 4903 scope.go:117] "RemoveContainer" containerID="f626787be4ced39e2e9fba37b148c92cb7728b70e29552779650c4dd06e40a83" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.469974 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.487843 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0"] Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.529809 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2"] Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.556331 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Nov 26 22:48:21 crc kubenswrapper[4903]: E1126 22:48:21.556844 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83b4acbb-6ade-4bd9-9506-4a0a95829480" containerName="aodh-notifier" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.556856 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="83b4acbb-6ade-4bd9-9506-4a0a95829480" containerName="aodh-notifier" Nov 26 22:48:21 crc kubenswrapper[4903]: E1126 22:48:21.556876 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83b4acbb-6ade-4bd9-9506-4a0a95829480" containerName="aodh-api" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.556882 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="83b4acbb-6ade-4bd9-9506-4a0a95829480" containerName="aodh-api" Nov 26 22:48:21 crc kubenswrapper[4903]: E1126 22:48:21.556906 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83b4acbb-6ade-4bd9-9506-4a0a95829480" containerName="aodh-listener" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.556912 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="83b4acbb-6ade-4bd9-9506-4a0a95829480" containerName="aodh-listener" Nov 26 22:48:21 crc kubenswrapper[4903]: E1126 22:48:21.556934 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83b4acbb-6ade-4bd9-9506-4a0a95829480" containerName="aodh-evaluator" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.556940 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="83b4acbb-6ade-4bd9-9506-4a0a95829480" containerName="aodh-evaluator" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.557136 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="83b4acbb-6ade-4bd9-9506-4a0a95829480" containerName="aodh-api" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.557150 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="83b4acbb-6ade-4bd9-9506-4a0a95829480" containerName="aodh-notifier" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.557173 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="83b4acbb-6ade-4bd9-9506-4a0a95829480" containerName="aodh-evaluator" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.557182 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="83b4acbb-6ade-4bd9-9506-4a0a95829480" containerName="aodh-listener" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.559201 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.562262 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-pkbb4" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.562468 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.562656 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.562870 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.563104 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.568983 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.576225 4903 scope.go:117] "RemoveContainer" containerID="838928e083bc7d4d767dd84268306d7317180ad54fab3966b563aebea4741396" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.605428 4903 scope.go:117] "RemoveContainer" containerID="07aa88fbd138008906bb8b408c2b6e39c86a742c7d941e733536fd097b8c9fc8" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.676497 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e8d0ddee-85d8-40d5-9cfc-d279c65aa4be-scripts\") pod \"aodh-0\" (UID: \"e8d0ddee-85d8-40d5-9cfc-d279c65aa4be\") " pod="openstack/aodh-0" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.676600 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8d0ddee-85d8-40d5-9cfc-d279c65aa4be-config-data\") pod \"aodh-0\" (UID: \"e8d0ddee-85d8-40d5-9cfc-d279c65aa4be\") " pod="openstack/aodh-0" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.676688 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-crk8d\" (UniqueName: \"kubernetes.io/projected/e8d0ddee-85d8-40d5-9cfc-d279c65aa4be-kube-api-access-crk8d\") pod \"aodh-0\" (UID: \"e8d0ddee-85d8-40d5-9cfc-d279c65aa4be\") " pod="openstack/aodh-0" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.676893 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8d0ddee-85d8-40d5-9cfc-d279c65aa4be-combined-ca-bundle\") pod \"aodh-0\" (UID: \"e8d0ddee-85d8-40d5-9cfc-d279c65aa4be\") " pod="openstack/aodh-0" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.677015 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e8d0ddee-85d8-40d5-9cfc-d279c65aa4be-public-tls-certs\") pod \"aodh-0\" (UID: \"e8d0ddee-85d8-40d5-9cfc-d279c65aa4be\") " pod="openstack/aodh-0" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.677069 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e8d0ddee-85d8-40d5-9cfc-d279c65aa4be-internal-tls-certs\") pod \"aodh-0\" (UID: \"e8d0ddee-85d8-40d5-9cfc-d279c65aa4be\") " pod="openstack/aodh-0" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.779124 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e8d0ddee-85d8-40d5-9cfc-d279c65aa4be-public-tls-certs\") pod \"aodh-0\" (UID: \"e8d0ddee-85d8-40d5-9cfc-d279c65aa4be\") " pod="openstack/aodh-0" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.779189 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e8d0ddee-85d8-40d5-9cfc-d279c65aa4be-internal-tls-certs\") pod \"aodh-0\" (UID: \"e8d0ddee-85d8-40d5-9cfc-d279c65aa4be\") " pod="openstack/aodh-0" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.779355 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e8d0ddee-85d8-40d5-9cfc-d279c65aa4be-scripts\") pod \"aodh-0\" (UID: \"e8d0ddee-85d8-40d5-9cfc-d279c65aa4be\") " pod="openstack/aodh-0" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.779403 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8d0ddee-85d8-40d5-9cfc-d279c65aa4be-config-data\") pod \"aodh-0\" (UID: \"e8d0ddee-85d8-40d5-9cfc-d279c65aa4be\") " pod="openstack/aodh-0" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.779451 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-crk8d\" (UniqueName: \"kubernetes.io/projected/e8d0ddee-85d8-40d5-9cfc-d279c65aa4be-kube-api-access-crk8d\") pod \"aodh-0\" (UID: \"e8d0ddee-85d8-40d5-9cfc-d279c65aa4be\") " pod="openstack/aodh-0" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.779488 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8d0ddee-85d8-40d5-9cfc-d279c65aa4be-combined-ca-bundle\") pod \"aodh-0\" (UID: \"e8d0ddee-85d8-40d5-9cfc-d279c65aa4be\") " pod="openstack/aodh-0" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.785831 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e8d0ddee-85d8-40d5-9cfc-d279c65aa4be-internal-tls-certs\") pod \"aodh-0\" (UID: \"e8d0ddee-85d8-40d5-9cfc-d279c65aa4be\") " pod="openstack/aodh-0" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.786606 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8d0ddee-85d8-40d5-9cfc-d279c65aa4be-combined-ca-bundle\") pod \"aodh-0\" (UID: \"e8d0ddee-85d8-40d5-9cfc-d279c65aa4be\") " pod="openstack/aodh-0" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.786826 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8d0ddee-85d8-40d5-9cfc-d279c65aa4be-config-data\") pod \"aodh-0\" (UID: \"e8d0ddee-85d8-40d5-9cfc-d279c65aa4be\") " pod="openstack/aodh-0" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.786893 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e8d0ddee-85d8-40d5-9cfc-d279c65aa4be-public-tls-certs\") pod \"aodh-0\" (UID: \"e8d0ddee-85d8-40d5-9cfc-d279c65aa4be\") " pod="openstack/aodh-0" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.786899 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e8d0ddee-85d8-40d5-9cfc-d279c65aa4be-scripts\") pod \"aodh-0\" (UID: \"e8d0ddee-85d8-40d5-9cfc-d279c65aa4be\") " pod="openstack/aodh-0" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.798319 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-crk8d\" (UniqueName: \"kubernetes.io/projected/e8d0ddee-85d8-40d5-9cfc-d279c65aa4be-kube-api-access-crk8d\") pod \"aodh-0\" (UID: \"e8d0ddee-85d8-40d5-9cfc-d279c65aa4be\") " pod="openstack/aodh-0" Nov 26 22:48:21 crc kubenswrapper[4903]: I1126 22:48:21.893320 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 26 22:48:22 crc kubenswrapper[4903]: I1126 22:48:22.065241 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83b4acbb-6ade-4bd9-9506-4a0a95829480" path="/var/lib/kubelet/pods/83b4acbb-6ade-4bd9-9506-4a0a95829480/volumes" Nov 26 22:48:22 crc kubenswrapper[4903]: I1126 22:48:22.409108 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2" event={"ID":"e9c5ea47-6ef3-44d4-b710-d11a2367448e","Type":"ContainerStarted","Data":"67a6cd715a1164722d3acfadbd900fc60dcbde3e8b219e3a88efb4fb62ec3711"} Nov 26 22:48:22 crc kubenswrapper[4903]: I1126 22:48:22.409369 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2" event={"ID":"e9c5ea47-6ef3-44d4-b710-d11a2367448e","Type":"ContainerStarted","Data":"4b3131d5d9a58592caac18f64d7d1354a22b4941806a7f89579eb6c19ea7f7b9"} Nov 26 22:48:22 crc kubenswrapper[4903]: I1126 22:48:22.423025 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 26 22:48:22 crc kubenswrapper[4903]: I1126 22:48:22.431329 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2" podStartSLOduration=1.8282951889999999 podStartE2EDuration="2.43131427s" podCreationTimestamp="2025-11-26 22:48:20 +0000 UTC" firstStartedPulling="2025-11-26 22:48:21.485463955 +0000 UTC m=+1630.175698855" lastFinishedPulling="2025-11-26 22:48:22.088483026 +0000 UTC m=+1630.778717936" observedRunningTime="2025-11-26 22:48:22.430598651 +0000 UTC m=+1631.120833571" watchObservedRunningTime="2025-11-26 22:48:22.43131427 +0000 UTC m=+1631.121549180" Nov 26 22:48:23 crc kubenswrapper[4903]: I1126 22:48:23.423752 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e8d0ddee-85d8-40d5-9cfc-d279c65aa4be","Type":"ContainerStarted","Data":"dabdcd51d3e66075089153369383af297a619d9f78c5cc408c69cb63d7422fec"} Nov 26 22:48:23 crc kubenswrapper[4903]: I1126 22:48:23.424983 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e8d0ddee-85d8-40d5-9cfc-d279c65aa4be","Type":"ContainerStarted","Data":"bc47190fcccea0f4c959cd22619fa723a4fc4d81a6c61abdb67cac8c95351d81"} Nov 26 22:48:24 crc kubenswrapper[4903]: I1126 22:48:24.438152 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e8d0ddee-85d8-40d5-9cfc-d279c65aa4be","Type":"ContainerStarted","Data":"c95aad08bf53311534419e8d23b750436481c2fa60f34a262237806a02756606"} Nov 26 22:48:25 crc kubenswrapper[4903]: I1126 22:48:25.467555 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e8d0ddee-85d8-40d5-9cfc-d279c65aa4be","Type":"ContainerStarted","Data":"55e1efaafdf5bd3d4ae1e7a478ddc7ff9dcb500486457a9341d23a6160e10c34"} Nov 26 22:48:26 crc kubenswrapper[4903]: I1126 22:48:26.500567 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e8d0ddee-85d8-40d5-9cfc-d279c65aa4be","Type":"ContainerStarted","Data":"e0845d394342920c032993919e8db0b0a7ad4db4471e2cb1779c18743b520fd8"} Nov 26 22:48:26 crc kubenswrapper[4903]: I1126 22:48:26.530019 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=1.871641388 podStartE2EDuration="5.53000403s" podCreationTimestamp="2025-11-26 22:48:21 +0000 UTC" firstStartedPulling="2025-11-26 22:48:22.417165383 +0000 UTC m=+1631.107400283" lastFinishedPulling="2025-11-26 22:48:26.075528025 +0000 UTC m=+1634.765762925" observedRunningTime="2025-11-26 22:48:26.525397947 +0000 UTC m=+1635.215632857" watchObservedRunningTime="2025-11-26 22:48:26.53000403 +0000 UTC m=+1635.220238940" Nov 26 22:48:31 crc kubenswrapper[4903]: I1126 22:48:31.981745 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 22:48:31 crc kubenswrapper[4903]: I1126 22:48:31.982443 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 22:49:01 crc kubenswrapper[4903]: I1126 22:49:01.981898 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 22:49:01 crc kubenswrapper[4903]: I1126 22:49:01.983849 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 22:49:17 crc kubenswrapper[4903]: I1126 22:49:17.652114 4903 scope.go:117] "RemoveContainer" containerID="180065f48eebcb22ee86fd9aefe971cd4dc6d64ab8679491f522210e3458e6be" Nov 26 22:49:17 crc kubenswrapper[4903]: I1126 22:49:17.690400 4903 scope.go:117] "RemoveContainer" containerID="ec4c452e74e3df9cf0bc6186413c8c13d2158912e0fc840fc06d34b1a2813d41" Nov 26 22:49:17 crc kubenswrapper[4903]: I1126 22:49:17.736755 4903 scope.go:117] "RemoveContainer" containerID="e264e63eabdb973cf35d856c39ff29b7250aaaecbd7cb02474dd78e0b2ad41a7" Nov 26 22:49:17 crc kubenswrapper[4903]: I1126 22:49:17.777288 4903 scope.go:117] "RemoveContainer" containerID="860e8d94bfb9963660da2caaf575ea05f7444de561aa082ad9dd084bd0272218" Nov 26 22:49:17 crc kubenswrapper[4903]: I1126 22:49:17.816585 4903 scope.go:117] "RemoveContainer" containerID="f2453b02c94fc71e33249f8f1c2f944ea16e1c243f91b038cc4c3a29e398ab7a" Nov 26 22:49:17 crc kubenswrapper[4903]: I1126 22:49:17.868857 4903 scope.go:117] "RemoveContainer" containerID="fff991dc19fb76a695f02d9175f28c9eee9b48598401b45ed8731f57bf9d7bdb" Nov 26 22:49:31 crc kubenswrapper[4903]: I1126 22:49:31.981336 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 22:49:31 crc kubenswrapper[4903]: I1126 22:49:31.982091 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 22:49:31 crc kubenswrapper[4903]: I1126 22:49:31.982162 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 22:49:31 crc kubenswrapper[4903]: I1126 22:49:31.983568 4903 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5225d111a6887c6aff9adea34e52808b691574abede178f64ce796508df9a2c9"} pod="openshift-machine-config-operator/machine-config-daemon-wjwph" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 22:49:31 crc kubenswrapper[4903]: I1126 22:49:31.983675 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" containerID="cri-o://5225d111a6887c6aff9adea34e52808b691574abede178f64ce796508df9a2c9" gracePeriod=600 Nov 26 22:49:32 crc kubenswrapper[4903]: E1126 22:49:32.120831 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 22:49:32 crc kubenswrapper[4903]: I1126 22:49:32.612850 4903 generic.go:334] "Generic (PLEG): container finished" podID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerID="5225d111a6887c6aff9adea34e52808b691574abede178f64ce796508df9a2c9" exitCode=0 Nov 26 22:49:32 crc kubenswrapper[4903]: I1126 22:49:32.612903 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerDied","Data":"5225d111a6887c6aff9adea34e52808b691574abede178f64ce796508df9a2c9"} Nov 26 22:49:32 crc kubenswrapper[4903]: I1126 22:49:32.612950 4903 scope.go:117] "RemoveContainer" containerID="a7d63365977e5a796bff719bf93bb8deb37153f9e84b6763869530932a4e1b36" Nov 26 22:49:32 crc kubenswrapper[4903]: I1126 22:49:32.613781 4903 scope.go:117] "RemoveContainer" containerID="5225d111a6887c6aff9adea34e52808b691574abede178f64ce796508df9a2c9" Nov 26 22:49:32 crc kubenswrapper[4903]: E1126 22:49:32.614221 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 22:49:44 crc kubenswrapper[4903]: I1126 22:49:44.030079 4903 scope.go:117] "RemoveContainer" containerID="5225d111a6887c6aff9adea34e52808b691574abede178f64ce796508df9a2c9" Nov 26 22:49:44 crc kubenswrapper[4903]: E1126 22:49:44.031954 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 22:49:58 crc kubenswrapper[4903]: I1126 22:49:58.029338 4903 scope.go:117] "RemoveContainer" containerID="5225d111a6887c6aff9adea34e52808b691574abede178f64ce796508df9a2c9" Nov 26 22:49:58 crc kubenswrapper[4903]: E1126 22:49:58.030891 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 22:50:10 crc kubenswrapper[4903]: I1126 22:50:10.029127 4903 scope.go:117] "RemoveContainer" containerID="5225d111a6887c6aff9adea34e52808b691574abede178f64ce796508df9a2c9" Nov 26 22:50:10 crc kubenswrapper[4903]: E1126 22:50:10.029994 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 22:50:18 crc kubenswrapper[4903]: I1126 22:50:18.069866 4903 scope.go:117] "RemoveContainer" containerID="df5fd9053af2144ab644d9cc231421b4dcd15c4c6fe9470436291ae62e134857" Nov 26 22:50:18 crc kubenswrapper[4903]: I1126 22:50:18.128011 4903 scope.go:117] "RemoveContainer" containerID="4dc2b6467314c282d106d8006f59bdb9a904b3c666d77a0350bad9cc7de820c1" Nov 26 22:50:18 crc kubenswrapper[4903]: I1126 22:50:18.221505 4903 scope.go:117] "RemoveContainer" containerID="3b8719ae15f3ee09a83474358e9125d061ea8ead25fecc0051331bfdb2122077" Nov 26 22:50:21 crc kubenswrapper[4903]: I1126 22:50:21.030054 4903 scope.go:117] "RemoveContainer" containerID="5225d111a6887c6aff9adea34e52808b691574abede178f64ce796508df9a2c9" Nov 26 22:50:21 crc kubenswrapper[4903]: E1126 22:50:21.031004 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 22:50:32 crc kubenswrapper[4903]: I1126 22:50:32.042071 4903 scope.go:117] "RemoveContainer" containerID="5225d111a6887c6aff9adea34e52808b691574abede178f64ce796508df9a2c9" Nov 26 22:50:32 crc kubenswrapper[4903]: E1126 22:50:32.045405 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 22:50:44 crc kubenswrapper[4903]: I1126 22:50:44.029719 4903 scope.go:117] "RemoveContainer" containerID="5225d111a6887c6aff9adea34e52808b691574abede178f64ce796508df9a2c9" Nov 26 22:50:44 crc kubenswrapper[4903]: E1126 22:50:44.030753 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 22:50:57 crc kubenswrapper[4903]: I1126 22:50:57.028480 4903 scope.go:117] "RemoveContainer" containerID="5225d111a6887c6aff9adea34e52808b691574abede178f64ce796508df9a2c9" Nov 26 22:50:57 crc kubenswrapper[4903]: E1126 22:50:57.029586 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 22:51:02 crc kubenswrapper[4903]: I1126 22:51:02.078392 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-d34e-account-create-update-2z8vz"] Nov 26 22:51:02 crc kubenswrapper[4903]: I1126 22:51:02.097554 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-df57-account-create-update-7g5m8"] Nov 26 22:51:02 crc kubenswrapper[4903]: I1126 22:51:02.111918 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-829mx"] Nov 26 22:51:02 crc kubenswrapper[4903]: I1126 22:51:02.124795 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-829mx"] Nov 26 22:51:02 crc kubenswrapper[4903]: I1126 22:51:02.134604 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-df57-account-create-update-7g5m8"] Nov 26 22:51:02 crc kubenswrapper[4903]: I1126 22:51:02.144117 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-zcdbt"] Nov 26 22:51:02 crc kubenswrapper[4903]: I1126 22:51:02.154660 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-d34e-account-create-update-2z8vz"] Nov 26 22:51:02 crc kubenswrapper[4903]: I1126 22:51:02.163920 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-zcdbt"] Nov 26 22:51:04 crc kubenswrapper[4903]: I1126 22:51:04.049274 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29ec5ffe-92c2-4f30-8477-913d14b49415" path="/var/lib/kubelet/pods/29ec5ffe-92c2-4f30-8477-913d14b49415/volumes" Nov 26 22:51:04 crc kubenswrapper[4903]: I1126 22:51:04.051002 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f687978-0ef1-4061-98ed-a8684824ece8" path="/var/lib/kubelet/pods/4f687978-0ef1-4061-98ed-a8684824ece8/volumes" Nov 26 22:51:04 crc kubenswrapper[4903]: I1126 22:51:04.052557 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84562f82-5408-4031-be94-2933a87dd5b0" path="/var/lib/kubelet/pods/84562f82-5408-4031-be94-2933a87dd5b0/volumes" Nov 26 22:51:04 crc kubenswrapper[4903]: I1126 22:51:04.053722 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b1a6da44-50e3-4b0d-8062-aafa8b65aaaf" path="/var/lib/kubelet/pods/b1a6da44-50e3-4b0d-8062-aafa8b65aaaf/volumes" Nov 26 22:51:07 crc kubenswrapper[4903]: I1126 22:51:07.063365 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-19c9-account-create-update-fkslg"] Nov 26 22:51:07 crc kubenswrapper[4903]: I1126 22:51:07.082961 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-78f3-account-create-update-rtnpt"] Nov 26 22:51:07 crc kubenswrapper[4903]: I1126 22:51:07.097850 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-pd66t"] Nov 26 22:51:07 crc kubenswrapper[4903]: I1126 22:51:07.110168 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-78f3-account-create-update-rtnpt"] Nov 26 22:51:07 crc kubenswrapper[4903]: I1126 22:51:07.120606 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-pd66t"] Nov 26 22:51:07 crc kubenswrapper[4903]: I1126 22:51:07.131976 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-w8mjj"] Nov 26 22:51:07 crc kubenswrapper[4903]: I1126 22:51:07.143710 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-19c9-account-create-update-fkslg"] Nov 26 22:51:07 crc kubenswrapper[4903]: I1126 22:51:07.155704 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-w8mjj"] Nov 26 22:51:08 crc kubenswrapper[4903]: I1126 22:51:08.031079 4903 scope.go:117] "RemoveContainer" containerID="5225d111a6887c6aff9adea34e52808b691574abede178f64ce796508df9a2c9" Nov 26 22:51:08 crc kubenswrapper[4903]: E1126 22:51:08.031775 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 22:51:08 crc kubenswrapper[4903]: I1126 22:51:08.053544 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fcf8863-3ab3-4617-9d3e-41256bf1c3de" path="/var/lib/kubelet/pods/5fcf8863-3ab3-4617-9d3e-41256bf1c3de/volumes" Nov 26 22:51:08 crc kubenswrapper[4903]: I1126 22:51:08.054687 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7208d6af-3e83-4569-b654-20c73205bab5" path="/var/lib/kubelet/pods/7208d6af-3e83-4569-b654-20c73205bab5/volumes" Nov 26 22:51:08 crc kubenswrapper[4903]: I1126 22:51:08.058620 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bcf866d6-b692-4e5c-bdc0-cf6569ecd016" path="/var/lib/kubelet/pods/bcf866d6-b692-4e5c-bdc0-cf6569ecd016/volumes" Nov 26 22:51:08 crc kubenswrapper[4903]: I1126 22:51:08.060908 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216" path="/var/lib/kubelet/pods/f0fac4ab-0a78-48f2-8a13-cc5c7bc7c216/volumes" Nov 26 22:51:12 crc kubenswrapper[4903]: I1126 22:51:12.070676 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-pbphv"] Nov 26 22:51:12 crc kubenswrapper[4903]: I1126 22:51:12.101403 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-f978-account-create-update-cxc2n"] Nov 26 22:51:12 crc kubenswrapper[4903]: I1126 22:51:12.119531 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-f978-account-create-update-cxc2n"] Nov 26 22:51:12 crc kubenswrapper[4903]: I1126 22:51:12.132007 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-pbphv"] Nov 26 22:51:14 crc kubenswrapper[4903]: I1126 22:51:14.040275 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ebba586-8f3a-4148-aec8-687eb566f1b0" path="/var/lib/kubelet/pods/7ebba586-8f3a-4148-aec8-687eb566f1b0/volumes" Nov 26 22:51:14 crc kubenswrapper[4903]: I1126 22:51:14.042579 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7711917-d475-44bb-8393-a803e720f32d" path="/var/lib/kubelet/pods/f7711917-d475-44bb-8393-a803e720f32d/volumes" Nov 26 22:51:18 crc kubenswrapper[4903]: I1126 22:51:18.404310 4903 scope.go:117] "RemoveContainer" containerID="50ae9f11d4069bdbd321bdedb9a5c1fa3807819bb9dbd8caa02117451dd0722a" Nov 26 22:51:18 crc kubenswrapper[4903]: I1126 22:51:18.458825 4903 scope.go:117] "RemoveContainer" containerID="7caf44671443d54e66db2aff9ab1f003babdf73860d4f8082d9d6c3b88ef1fef" Nov 26 22:51:18 crc kubenswrapper[4903]: I1126 22:51:18.532382 4903 scope.go:117] "RemoveContainer" containerID="6ac700e941269c1124b9947be2da1a22d2f4a7cf2f140310bc84e7ae6940291c" Nov 26 22:51:18 crc kubenswrapper[4903]: I1126 22:51:18.631874 4903 scope.go:117] "RemoveContainer" containerID="00986bc0d77af6ad74e6112ba9111364d0b84ad7b86d1e2512afc5825e888bdc" Nov 26 22:51:18 crc kubenswrapper[4903]: I1126 22:51:18.741751 4903 scope.go:117] "RemoveContainer" containerID="1cf6b0a259ad9abc4cf723013d2f0777e47205e3bcc59ff92232da8e141ec567" Nov 26 22:51:18 crc kubenswrapper[4903]: I1126 22:51:18.790202 4903 scope.go:117] "RemoveContainer" containerID="72edf5984fe7e080343e5df752f8e71ab00dd55d4bbbb6b52144f5a43c63615c" Nov 26 22:51:18 crc kubenswrapper[4903]: I1126 22:51:18.847785 4903 scope.go:117] "RemoveContainer" containerID="7dc4941a306483ee82b589f5308302beff0c8df2896d9f429220f03853ced321" Nov 26 22:51:18 crc kubenswrapper[4903]: I1126 22:51:18.878961 4903 scope.go:117] "RemoveContainer" containerID="81ff826347c0a76aa20e2da23dd58e1ec3ea3df1605e8184063e786d61c5a714" Nov 26 22:51:18 crc kubenswrapper[4903]: I1126 22:51:18.910415 4903 scope.go:117] "RemoveContainer" containerID="0ec011bf2c8ccdeb94b40891e819aa71331730ba9b78bad633720f4653f378ea" Nov 26 22:51:18 crc kubenswrapper[4903]: I1126 22:51:18.958733 4903 scope.go:117] "RemoveContainer" containerID="d1867e44d3619f4f752d2bb02a07c0a0fd935e0df9c9fffa693e4e6c2fce3903" Nov 26 22:51:21 crc kubenswrapper[4903]: I1126 22:51:21.029142 4903 scope.go:117] "RemoveContainer" containerID="5225d111a6887c6aff9adea34e52808b691574abede178f64ce796508df9a2c9" Nov 26 22:51:21 crc kubenswrapper[4903]: E1126 22:51:21.029830 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 22:51:31 crc kubenswrapper[4903]: I1126 22:51:31.054090 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-s2ggb"] Nov 26 22:51:31 crc kubenswrapper[4903]: I1126 22:51:31.108987 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-s2ggb"] Nov 26 22:51:31 crc kubenswrapper[4903]: I1126 22:51:31.411026 4903 generic.go:334] "Generic (PLEG): container finished" podID="e9c5ea47-6ef3-44d4-b710-d11a2367448e" containerID="67a6cd715a1164722d3acfadbd900fc60dcbde3e8b219e3a88efb4fb62ec3711" exitCode=0 Nov 26 22:51:31 crc kubenswrapper[4903]: I1126 22:51:31.411072 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2" event={"ID":"e9c5ea47-6ef3-44d4-b710-d11a2367448e","Type":"ContainerDied","Data":"67a6cd715a1164722d3acfadbd900fc60dcbde3e8b219e3a88efb4fb62ec3711"} Nov 26 22:51:32 crc kubenswrapper[4903]: I1126 22:51:32.047058 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd" path="/var/lib/kubelet/pods/7a0dbb6e-0534-4e68-9f85-5ed52bb6a6fd/volumes" Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.028826 4903 scope.go:117] "RemoveContainer" containerID="5225d111a6887c6aff9adea34e52808b691574abede178f64ce796508df9a2c9" Nov 26 22:51:33 crc kubenswrapper[4903]: E1126 22:51:33.029561 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.030838 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2" Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.133610 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9c5ea47-6ef3-44d4-b710-d11a2367448e-bootstrap-combined-ca-bundle\") pod \"e9c5ea47-6ef3-44d4-b710-d11a2367448e\" (UID: \"e9c5ea47-6ef3-44d4-b710-d11a2367448e\") " Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.133770 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e9c5ea47-6ef3-44d4-b710-d11a2367448e-ssh-key\") pod \"e9c5ea47-6ef3-44d4-b710-d11a2367448e\" (UID: \"e9c5ea47-6ef3-44d4-b710-d11a2367448e\") " Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.133855 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-72vnq\" (UniqueName: \"kubernetes.io/projected/e9c5ea47-6ef3-44d4-b710-d11a2367448e-kube-api-access-72vnq\") pod \"e9c5ea47-6ef3-44d4-b710-d11a2367448e\" (UID: \"e9c5ea47-6ef3-44d4-b710-d11a2367448e\") " Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.134003 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e9c5ea47-6ef3-44d4-b710-d11a2367448e-inventory\") pod \"e9c5ea47-6ef3-44d4-b710-d11a2367448e\" (UID: \"e9c5ea47-6ef3-44d4-b710-d11a2367448e\") " Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.143180 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9c5ea47-6ef3-44d4-b710-d11a2367448e-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "e9c5ea47-6ef3-44d4-b710-d11a2367448e" (UID: "e9c5ea47-6ef3-44d4-b710-d11a2367448e"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.143559 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9c5ea47-6ef3-44d4-b710-d11a2367448e-kube-api-access-72vnq" (OuterVolumeSpecName: "kube-api-access-72vnq") pod "e9c5ea47-6ef3-44d4-b710-d11a2367448e" (UID: "e9c5ea47-6ef3-44d4-b710-d11a2367448e"). InnerVolumeSpecName "kube-api-access-72vnq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.176855 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9c5ea47-6ef3-44d4-b710-d11a2367448e-inventory" (OuterVolumeSpecName: "inventory") pod "e9c5ea47-6ef3-44d4-b710-d11a2367448e" (UID: "e9c5ea47-6ef3-44d4-b710-d11a2367448e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.203298 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9c5ea47-6ef3-44d4-b710-d11a2367448e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e9c5ea47-6ef3-44d4-b710-d11a2367448e" (UID: "e9c5ea47-6ef3-44d4-b710-d11a2367448e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.240176 4903 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e9c5ea47-6ef3-44d4-b710-d11a2367448e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.240323 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-72vnq\" (UniqueName: \"kubernetes.io/projected/e9c5ea47-6ef3-44d4-b710-d11a2367448e-kube-api-access-72vnq\") on node \"crc\" DevicePath \"\"" Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.240355 4903 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e9c5ea47-6ef3-44d4-b710-d11a2367448e-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.240377 4903 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9c5ea47-6ef3-44d4-b710-d11a2367448e-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.438923 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2" event={"ID":"e9c5ea47-6ef3-44d4-b710-d11a2367448e","Type":"ContainerDied","Data":"4b3131d5d9a58592caac18f64d7d1354a22b4941806a7f89579eb6c19ea7f7b9"} Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.438984 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4b3131d5d9a58592caac18f64d7d1354a22b4941806a7f89579eb6c19ea7f7b9" Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.439005 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2" Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.542952 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-k665c"] Nov 26 22:51:33 crc kubenswrapper[4903]: E1126 22:51:33.544129 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9c5ea47-6ef3-44d4-b710-d11a2367448e" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.544164 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9c5ea47-6ef3-44d4-b710-d11a2367448e" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.544641 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9c5ea47-6ef3-44d4-b710-d11a2367448e" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.546175 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-k665c" Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.548958 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.552249 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.552266 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.552574 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-62bf2" Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.557729 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-k665c"] Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.650133 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8115e93a-72c0-4022-a687-6b58fb3c45ab-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-k665c\" (UID: \"8115e93a-72c0-4022-a687-6b58fb3c45ab\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-k665c" Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.650293 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8115e93a-72c0-4022-a687-6b58fb3c45ab-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-k665c\" (UID: \"8115e93a-72c0-4022-a687-6b58fb3c45ab\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-k665c" Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.650534 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7hkp\" (UniqueName: \"kubernetes.io/projected/8115e93a-72c0-4022-a687-6b58fb3c45ab-kube-api-access-v7hkp\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-k665c\" (UID: \"8115e93a-72c0-4022-a687-6b58fb3c45ab\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-k665c" Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.753546 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8115e93a-72c0-4022-a687-6b58fb3c45ab-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-k665c\" (UID: \"8115e93a-72c0-4022-a687-6b58fb3c45ab\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-k665c" Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.753774 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7hkp\" (UniqueName: \"kubernetes.io/projected/8115e93a-72c0-4022-a687-6b58fb3c45ab-kube-api-access-v7hkp\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-k665c\" (UID: \"8115e93a-72c0-4022-a687-6b58fb3c45ab\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-k665c" Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.753890 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8115e93a-72c0-4022-a687-6b58fb3c45ab-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-k665c\" (UID: \"8115e93a-72c0-4022-a687-6b58fb3c45ab\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-k665c" Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.759792 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8115e93a-72c0-4022-a687-6b58fb3c45ab-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-k665c\" (UID: \"8115e93a-72c0-4022-a687-6b58fb3c45ab\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-k665c" Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.760620 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8115e93a-72c0-4022-a687-6b58fb3c45ab-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-k665c\" (UID: \"8115e93a-72c0-4022-a687-6b58fb3c45ab\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-k665c" Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.771407 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7hkp\" (UniqueName: \"kubernetes.io/projected/8115e93a-72c0-4022-a687-6b58fb3c45ab-kube-api-access-v7hkp\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-k665c\" (UID: \"8115e93a-72c0-4022-a687-6b58fb3c45ab\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-k665c" Nov 26 22:51:33 crc kubenswrapper[4903]: I1126 22:51:33.868611 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-k665c" Nov 26 22:51:34 crc kubenswrapper[4903]: I1126 22:51:34.075769 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-020d-account-create-update-ll6gr"] Nov 26 22:51:34 crc kubenswrapper[4903]: I1126 22:51:34.099423 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-020d-account-create-update-ll6gr"] Nov 26 22:51:34 crc kubenswrapper[4903]: I1126 22:51:34.591000 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-k665c"] Nov 26 22:51:35 crc kubenswrapper[4903]: I1126 22:51:35.045163 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-create-6x6mf"] Nov 26 22:51:35 crc kubenswrapper[4903]: I1126 22:51:35.072589 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-4fb4-account-create-update-gwgf9"] Nov 26 22:51:35 crc kubenswrapper[4903]: I1126 22:51:35.085161 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-create-6x6mf"] Nov 26 22:51:35 crc kubenswrapper[4903]: I1126 22:51:35.097362 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-4fb4-account-create-update-gwgf9"] Nov 26 22:51:35 crc kubenswrapper[4903]: I1126 22:51:35.107793 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-f7fd-account-create-update-m5lqr"] Nov 26 22:51:35 crc kubenswrapper[4903]: I1126 22:51:35.118716 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-2908-account-create-update-dhdsh"] Nov 26 22:51:35 crc kubenswrapper[4903]: I1126 22:51:35.130517 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-ll4x9"] Nov 26 22:51:35 crc kubenswrapper[4903]: I1126 22:51:35.140516 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-2908-account-create-update-dhdsh"] Nov 26 22:51:35 crc kubenswrapper[4903]: I1126 22:51:35.150860 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-ll4x9"] Nov 26 22:51:35 crc kubenswrapper[4903]: I1126 22:51:35.162068 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-f7fd-account-create-update-m5lqr"] Nov 26 22:51:35 crc kubenswrapper[4903]: I1126 22:51:35.174416 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-m6b9x"] Nov 26 22:51:35 crc kubenswrapper[4903]: I1126 22:51:35.186108 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-m6b9x"] Nov 26 22:51:35 crc kubenswrapper[4903]: I1126 22:51:35.484470 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-k665c" event={"ID":"8115e93a-72c0-4022-a687-6b58fb3c45ab","Type":"ContainerStarted","Data":"88de2153e9d4f7aeda030fafe0e8d85028774f185ddf56fb42582d22fec80d66"} Nov 26 22:51:36 crc kubenswrapper[4903]: I1126 22:51:36.040286 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="120af257-f4df-4cb8-ab06-baa9eaaab9b6" path="/var/lib/kubelet/pods/120af257-f4df-4cb8-ab06-baa9eaaab9b6/volumes" Nov 26 22:51:36 crc kubenswrapper[4903]: I1126 22:51:36.042749 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b33485e-2d65-4308-aee1-eb14a019f91f" path="/var/lib/kubelet/pods/2b33485e-2d65-4308-aee1-eb14a019f91f/volumes" Nov 26 22:51:36 crc kubenswrapper[4903]: I1126 22:51:36.045269 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41b40a6f-4842-4a24-8cff-cf57fd96bfdd" path="/var/lib/kubelet/pods/41b40a6f-4842-4a24-8cff-cf57fd96bfdd/volumes" Nov 26 22:51:36 crc kubenswrapper[4903]: I1126 22:51:36.045883 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55b6735a-7c30-4cf1-86a9-d61e408ee84d" path="/var/lib/kubelet/pods/55b6735a-7c30-4cf1-86a9-d61e408ee84d/volumes" Nov 26 22:51:36 crc kubenswrapper[4903]: I1126 22:51:36.046440 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66d689d6-dee8-4ed9-a354-343757962010" path="/var/lib/kubelet/pods/66d689d6-dee8-4ed9-a354-343757962010/volumes" Nov 26 22:51:36 crc kubenswrapper[4903]: I1126 22:51:36.047605 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a15d74f-ad69-40ad-a811-3de51ff0f4e9" path="/var/lib/kubelet/pods/7a15d74f-ad69-40ad-a811-3de51ff0f4e9/volumes" Nov 26 22:51:36 crc kubenswrapper[4903]: I1126 22:51:36.048199 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4f63771-5b16-4801-b549-f51085e05d23" path="/var/lib/kubelet/pods/f4f63771-5b16-4801-b549-f51085e05d23/volumes" Nov 26 22:51:36 crc kubenswrapper[4903]: I1126 22:51:36.499293 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-k665c" event={"ID":"8115e93a-72c0-4022-a687-6b58fb3c45ab","Type":"ContainerStarted","Data":"dd51a2f4fc1fa500d989a74f006cbbdd920db91fa2c85e141baa7bd03c5666e1"} Nov 26 22:51:36 crc kubenswrapper[4903]: I1126 22:51:36.542133 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-k665c" podStartSLOduration=2.827657492 podStartE2EDuration="3.542108983s" podCreationTimestamp="2025-11-26 22:51:33 +0000 UTC" firstStartedPulling="2025-11-26 22:51:34.580579327 +0000 UTC m=+1823.270814247" lastFinishedPulling="2025-11-26 22:51:35.295030828 +0000 UTC m=+1823.985265738" observedRunningTime="2025-11-26 22:51:36.525428108 +0000 UTC m=+1825.215663028" watchObservedRunningTime="2025-11-26 22:51:36.542108983 +0000 UTC m=+1825.232343903" Nov 26 22:51:43 crc kubenswrapper[4903]: I1126 22:51:43.068226 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-tl7bs"] Nov 26 22:51:43 crc kubenswrapper[4903]: I1126 22:51:43.081062 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-tl7bs"] Nov 26 22:51:44 crc kubenswrapper[4903]: I1126 22:51:44.052326 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bfa72389-fa99-4e21-95c4-ca6a19783753" path="/var/lib/kubelet/pods/bfa72389-fa99-4e21-95c4-ca6a19783753/volumes" Nov 26 22:51:48 crc kubenswrapper[4903]: I1126 22:51:48.029683 4903 scope.go:117] "RemoveContainer" containerID="5225d111a6887c6aff9adea34e52808b691574abede178f64ce796508df9a2c9" Nov 26 22:51:48 crc kubenswrapper[4903]: E1126 22:51:48.030423 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 22:52:01 crc kubenswrapper[4903]: I1126 22:52:01.043756 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-tz77l"] Nov 26 22:52:01 crc kubenswrapper[4903]: I1126 22:52:01.060040 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-tz77l"] Nov 26 22:52:02 crc kubenswrapper[4903]: I1126 22:52:02.038927 4903 scope.go:117] "RemoveContainer" containerID="5225d111a6887c6aff9adea34e52808b691574abede178f64ce796508df9a2c9" Nov 26 22:52:02 crc kubenswrapper[4903]: E1126 22:52:02.039477 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 22:52:02 crc kubenswrapper[4903]: I1126 22:52:02.046454 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2388445a-1656-41aa-8daa-a120993c24ad" path="/var/lib/kubelet/pods/2388445a-1656-41aa-8daa-a120993c24ad/volumes" Nov 26 22:52:12 crc kubenswrapper[4903]: I1126 22:52:12.081648 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-fv6wn"] Nov 26 22:52:12 crc kubenswrapper[4903]: I1126 22:52:12.121937 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-fv6wn"] Nov 26 22:52:14 crc kubenswrapper[4903]: I1126 22:52:14.050424 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e30967f0-d295-4017-a586-5b1afdbcd625" path="/var/lib/kubelet/pods/e30967f0-d295-4017-a586-5b1afdbcd625/volumes" Nov 26 22:52:17 crc kubenswrapper[4903]: I1126 22:52:17.029682 4903 scope.go:117] "RemoveContainer" containerID="5225d111a6887c6aff9adea34e52808b691574abede178f64ce796508df9a2c9" Nov 26 22:52:17 crc kubenswrapper[4903]: E1126 22:52:17.033037 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 22:52:19 crc kubenswrapper[4903]: I1126 22:52:19.265577 4903 scope.go:117] "RemoveContainer" containerID="473a4cea01238ffc41e1b8232d0b8584e58cde61ee94ea756646bcde3efead1c" Nov 26 22:52:19 crc kubenswrapper[4903]: I1126 22:52:19.302860 4903 scope.go:117] "RemoveContainer" containerID="ea263a427bb296fe4e58bbadcd1a5364cf51a85a387f886157cb91052f292f0f" Nov 26 22:52:19 crc kubenswrapper[4903]: I1126 22:52:19.412899 4903 scope.go:117] "RemoveContainer" containerID="08c1c908848efb6fe54928f091e9fc6bc99f387b7e4f214ed95ed5e31db1b743" Nov 26 22:52:19 crc kubenswrapper[4903]: I1126 22:52:19.446088 4903 scope.go:117] "RemoveContainer" containerID="937419d1cfa9dfaaf13f9e87289734539342fe8a3e29b1c2684a29052ff90a6b" Nov 26 22:52:19 crc kubenswrapper[4903]: I1126 22:52:19.489554 4903 scope.go:117] "RemoveContainer" containerID="eb3517d9cda792ee8700678cc808637d2da9a8b9a196fce3c2293d2a72c7ea99" Nov 26 22:52:19 crc kubenswrapper[4903]: I1126 22:52:19.542056 4903 scope.go:117] "RemoveContainer" containerID="35d0786e86e60a2ab07701ebf120fb73fd9e1f1698cd3c3ad03a5ebe9e546312" Nov 26 22:52:19 crc kubenswrapper[4903]: I1126 22:52:19.583131 4903 scope.go:117] "RemoveContainer" containerID="95bd9933a77da21c07c5b067b65641174939d8e3854d10c964a5e63ed44bb7a0" Nov 26 22:52:19 crc kubenswrapper[4903]: I1126 22:52:19.607540 4903 scope.go:117] "RemoveContainer" containerID="22ba27d2a1d072ac91ddf35b5ee76a0cde90ab56360b4fa63d07560ce01e7b44" Nov 26 22:52:19 crc kubenswrapper[4903]: I1126 22:52:19.628477 4903 scope.go:117] "RemoveContainer" containerID="1a4c90f3dfd5e0cc46c98e62069ed051a120dbf3962c91a1f4f11c59a33a62cf" Nov 26 22:52:19 crc kubenswrapper[4903]: I1126 22:52:19.652373 4903 scope.go:117] "RemoveContainer" containerID="40fdf811f5bdfaf929daa3191de012ceb5d946f6491852ad2e0b1a7a7ddf68c2" Nov 26 22:52:19 crc kubenswrapper[4903]: I1126 22:52:19.683072 4903 scope.go:117] "RemoveContainer" containerID="2c4bf1480b174afe7951f003504b452575df913d2c0f2dbe00a7551777d82b08" Nov 26 22:52:23 crc kubenswrapper[4903]: I1126 22:52:23.043621 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-xhf2r"] Nov 26 22:52:23 crc kubenswrapper[4903]: I1126 22:52:23.055062 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-xhf2r"] Nov 26 22:52:24 crc kubenswrapper[4903]: I1126 22:52:24.058584 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97a5a56e-9f78-4cc6-9299-ebe193cad354" path="/var/lib/kubelet/pods/97a5a56e-9f78-4cc6-9299-ebe193cad354/volumes" Nov 26 22:52:24 crc kubenswrapper[4903]: I1126 22:52:24.060467 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-lhsbl"] Nov 26 22:52:24 crc kubenswrapper[4903]: I1126 22:52:24.073937 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-lhsbl"] Nov 26 22:52:25 crc kubenswrapper[4903]: I1126 22:52:25.038871 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-w6n8h"] Nov 26 22:52:25 crc kubenswrapper[4903]: I1126 22:52:25.059296 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-w6n8h"] Nov 26 22:52:26 crc kubenswrapper[4903]: I1126 22:52:26.047936 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b33bddbf-64ec-40c6-a7ea-5919c5a1042d" path="/var/lib/kubelet/pods/b33bddbf-64ec-40c6-a7ea-5919c5a1042d/volumes" Nov 26 22:52:26 crc kubenswrapper[4903]: I1126 22:52:26.049938 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f42951d5-40b8-4f39-8a87-5f7e5809bf87" path="/var/lib/kubelet/pods/f42951d5-40b8-4f39-8a87-5f7e5809bf87/volumes" Nov 26 22:52:29 crc kubenswrapper[4903]: I1126 22:52:29.028565 4903 scope.go:117] "RemoveContainer" containerID="5225d111a6887c6aff9adea34e52808b691574abede178f64ce796508df9a2c9" Nov 26 22:52:29 crc kubenswrapper[4903]: E1126 22:52:29.029229 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 22:52:36 crc kubenswrapper[4903]: I1126 22:52:36.065096 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-fxksz"] Nov 26 22:52:36 crc kubenswrapper[4903]: I1126 22:52:36.084414 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-fxksz"] Nov 26 22:52:38 crc kubenswrapper[4903]: I1126 22:52:38.048836 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0b2d5fd-9425-4082-b1cc-3ce796c82e0c" path="/var/lib/kubelet/pods/d0b2d5fd-9425-4082-b1cc-3ce796c82e0c/volumes" Nov 26 22:52:44 crc kubenswrapper[4903]: I1126 22:52:44.030750 4903 scope.go:117] "RemoveContainer" containerID="5225d111a6887c6aff9adea34e52808b691574abede178f64ce796508df9a2c9" Nov 26 22:52:44 crc kubenswrapper[4903]: E1126 22:52:44.031917 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 22:52:55 crc kubenswrapper[4903]: I1126 22:52:55.029412 4903 scope.go:117] "RemoveContainer" containerID="5225d111a6887c6aff9adea34e52808b691574abede178f64ce796508df9a2c9" Nov 26 22:52:55 crc kubenswrapper[4903]: E1126 22:52:55.030679 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 22:53:09 crc kubenswrapper[4903]: I1126 22:53:09.030030 4903 scope.go:117] "RemoveContainer" containerID="5225d111a6887c6aff9adea34e52808b691574abede178f64ce796508df9a2c9" Nov 26 22:53:09 crc kubenswrapper[4903]: E1126 22:53:09.031168 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 22:53:09 crc kubenswrapper[4903]: I1126 22:53:09.049895 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-xw96b"] Nov 26 22:53:09 crc kubenswrapper[4903]: I1126 22:53:09.064228 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-3d57-account-create-update-kbz2q"] Nov 26 22:53:09 crc kubenswrapper[4903]: I1126 22:53:09.076117 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-6b7jl"] Nov 26 22:53:09 crc kubenswrapper[4903]: I1126 22:53:09.087218 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-a856-account-create-update-fr9ds"] Nov 26 22:53:09 crc kubenswrapper[4903]: I1126 22:53:09.099053 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-78l4c"] Nov 26 22:53:09 crc kubenswrapper[4903]: I1126 22:53:09.114670 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-xw96b"] Nov 26 22:53:09 crc kubenswrapper[4903]: I1126 22:53:09.126648 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-6b7jl"] Nov 26 22:53:09 crc kubenswrapper[4903]: I1126 22:53:09.138482 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-78l4c"] Nov 26 22:53:09 crc kubenswrapper[4903]: I1126 22:53:09.150500 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-3d57-account-create-update-kbz2q"] Nov 26 22:53:09 crc kubenswrapper[4903]: I1126 22:53:09.168133 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-a856-account-create-update-fr9ds"] Nov 26 22:53:09 crc kubenswrapper[4903]: I1126 22:53:09.184117 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-f7c7-account-create-update-s7xfc"] Nov 26 22:53:09 crc kubenswrapper[4903]: I1126 22:53:09.200589 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-f7c7-account-create-update-s7xfc"] Nov 26 22:53:10 crc kubenswrapper[4903]: I1126 22:53:10.070116 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="723772d5-ee6f-493e-94f1-7b9804ec1957" path="/var/lib/kubelet/pods/723772d5-ee6f-493e-94f1-7b9804ec1957/volumes" Nov 26 22:53:10 crc kubenswrapper[4903]: I1126 22:53:10.072321 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7301bb2b-a968-426c-8c7b-147e84af9d2e" path="/var/lib/kubelet/pods/7301bb2b-a968-426c-8c7b-147e84af9d2e/volumes" Nov 26 22:53:10 crc kubenswrapper[4903]: I1126 22:53:10.073171 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="88faddae-4ab0-4bb0-886c-ce747933a8d2" path="/var/lib/kubelet/pods/88faddae-4ab0-4bb0-886c-ce747933a8d2/volumes" Nov 26 22:53:10 crc kubenswrapper[4903]: I1126 22:53:10.073971 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9861636-fbd1-48eb-b179-efaed34ef23a" path="/var/lib/kubelet/pods/a9861636-fbd1-48eb-b179-efaed34ef23a/volumes" Nov 26 22:53:10 crc kubenswrapper[4903]: I1126 22:53:10.076334 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="acc450ee-60c4-4203-9e53-bfc0d0996227" path="/var/lib/kubelet/pods/acc450ee-60c4-4203-9e53-bfc0d0996227/volumes" Nov 26 22:53:10 crc kubenswrapper[4903]: I1126 22:53:10.077042 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b845761e-5639-47f8-b33e-982f99d9e575" path="/var/lib/kubelet/pods/b845761e-5639-47f8-b33e-982f99d9e575/volumes" Nov 26 22:53:20 crc kubenswrapper[4903]: I1126 22:53:20.019931 4903 scope.go:117] "RemoveContainer" containerID="c5590704da57a0e07ad6a74affef23064afbed74c71edf63ef9bf49c5c1e04e2" Nov 26 22:53:20 crc kubenswrapper[4903]: I1126 22:53:20.073010 4903 scope.go:117] "RemoveContainer" containerID="d8426fe1ef5abc5d02e44965f680d7bcd077e502b7c2edcc8271d9904bbdbf66" Nov 26 22:53:20 crc kubenswrapper[4903]: I1126 22:53:20.150609 4903 scope.go:117] "RemoveContainer" containerID="bdc114f58a702c71b3544ee1958a7f89c3282e660dae2d4d6c780d8d221c7910" Nov 26 22:53:20 crc kubenswrapper[4903]: I1126 22:53:20.197551 4903 scope.go:117] "RemoveContainer" containerID="41ec46f787f4db088b4d9fd45388d9477a16bc1a1ca740c83d6913c3baae4559" Nov 26 22:53:20 crc kubenswrapper[4903]: I1126 22:53:20.242632 4903 scope.go:117] "RemoveContainer" containerID="3e487066b0cc9784f92e20198698aa818e63a1baf402525ef7d07dd11df1512c" Nov 26 22:53:20 crc kubenswrapper[4903]: I1126 22:53:20.298388 4903 scope.go:117] "RemoveContainer" containerID="a708073e0f2b077031c503a6ed2c86aecd807b154f4c65af17e815adf1740eab" Nov 26 22:53:20 crc kubenswrapper[4903]: I1126 22:53:20.347138 4903 scope.go:117] "RemoveContainer" containerID="b77c66d26b5e5affdaef4e9bc1f1f0fe69ceb3ffbf82926fbafec37d80355562" Nov 26 22:53:20 crc kubenswrapper[4903]: I1126 22:53:20.378854 4903 scope.go:117] "RemoveContainer" containerID="1b3cecdf8ffa6caf25cc440b94e68b03a729d9f7aa461cfe5da517150763d437" Nov 26 22:53:20 crc kubenswrapper[4903]: I1126 22:53:20.409642 4903 scope.go:117] "RemoveContainer" containerID="bd02a539b33242ed344060fd4ba88d9478bfc258dd2d63e898c32f6127340156" Nov 26 22:53:20 crc kubenswrapper[4903]: I1126 22:53:20.440019 4903 scope.go:117] "RemoveContainer" containerID="7ec60eda21ae74821dcc7d16f84f8b93378b7402ff66bf70b333fe31f0adc7de" Nov 26 22:53:23 crc kubenswrapper[4903]: I1126 22:53:23.030032 4903 scope.go:117] "RemoveContainer" containerID="5225d111a6887c6aff9adea34e52808b691574abede178f64ce796508df9a2c9" Nov 26 22:53:23 crc kubenswrapper[4903]: E1126 22:53:23.033161 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 22:53:36 crc kubenswrapper[4903]: I1126 22:53:36.329305 4903 generic.go:334] "Generic (PLEG): container finished" podID="8115e93a-72c0-4022-a687-6b58fb3c45ab" containerID="dd51a2f4fc1fa500d989a74f006cbbdd920db91fa2c85e141baa7bd03c5666e1" exitCode=0 Nov 26 22:53:36 crc kubenswrapper[4903]: I1126 22:53:36.329638 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-k665c" event={"ID":"8115e93a-72c0-4022-a687-6b58fb3c45ab","Type":"ContainerDied","Data":"dd51a2f4fc1fa500d989a74f006cbbdd920db91fa2c85e141baa7bd03c5666e1"} Nov 26 22:53:37 crc kubenswrapper[4903]: I1126 22:53:37.029351 4903 scope.go:117] "RemoveContainer" containerID="5225d111a6887c6aff9adea34e52808b691574abede178f64ce796508df9a2c9" Nov 26 22:53:37 crc kubenswrapper[4903]: E1126 22:53:37.029940 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 22:53:37 crc kubenswrapper[4903]: I1126 22:53:37.954961 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-k665c" Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.051409 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-sfl5w"] Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.062381 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-sfl5w"] Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.069296 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8115e93a-72c0-4022-a687-6b58fb3c45ab-inventory\") pod \"8115e93a-72c0-4022-a687-6b58fb3c45ab\" (UID: \"8115e93a-72c0-4022-a687-6b58fb3c45ab\") " Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.069448 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v7hkp\" (UniqueName: \"kubernetes.io/projected/8115e93a-72c0-4022-a687-6b58fb3c45ab-kube-api-access-v7hkp\") pod \"8115e93a-72c0-4022-a687-6b58fb3c45ab\" (UID: \"8115e93a-72c0-4022-a687-6b58fb3c45ab\") " Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.069622 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8115e93a-72c0-4022-a687-6b58fb3c45ab-ssh-key\") pod \"8115e93a-72c0-4022-a687-6b58fb3c45ab\" (UID: \"8115e93a-72c0-4022-a687-6b58fb3c45ab\") " Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.078034 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8115e93a-72c0-4022-a687-6b58fb3c45ab-kube-api-access-v7hkp" (OuterVolumeSpecName: "kube-api-access-v7hkp") pod "8115e93a-72c0-4022-a687-6b58fb3c45ab" (UID: "8115e93a-72c0-4022-a687-6b58fb3c45ab"). InnerVolumeSpecName "kube-api-access-v7hkp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.106284 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8115e93a-72c0-4022-a687-6b58fb3c45ab-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "8115e93a-72c0-4022-a687-6b58fb3c45ab" (UID: "8115e93a-72c0-4022-a687-6b58fb3c45ab"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.123916 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8115e93a-72c0-4022-a687-6b58fb3c45ab-inventory" (OuterVolumeSpecName: "inventory") pod "8115e93a-72c0-4022-a687-6b58fb3c45ab" (UID: "8115e93a-72c0-4022-a687-6b58fb3c45ab"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.172858 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v7hkp\" (UniqueName: \"kubernetes.io/projected/8115e93a-72c0-4022-a687-6b58fb3c45ab-kube-api-access-v7hkp\") on node \"crc\" DevicePath \"\"" Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.172900 4903 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8115e93a-72c0-4022-a687-6b58fb3c45ab-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.172910 4903 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8115e93a-72c0-4022-a687-6b58fb3c45ab-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.363365 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-k665c" event={"ID":"8115e93a-72c0-4022-a687-6b58fb3c45ab","Type":"ContainerDied","Data":"88de2153e9d4f7aeda030fafe0e8d85028774f185ddf56fb42582d22fec80d66"} Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.363423 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="88de2153e9d4f7aeda030fafe0e8d85028774f185ddf56fb42582d22fec80d66" Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.363499 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-k665c" Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.474763 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-65pnw"] Nov 26 22:53:38 crc kubenswrapper[4903]: E1126 22:53:38.475292 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8115e93a-72c0-4022-a687-6b58fb3c45ab" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.475311 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="8115e93a-72c0-4022-a687-6b58fb3c45ab" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.475609 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="8115e93a-72c0-4022-a687-6b58fb3c45ab" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.476464 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-65pnw" Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.479265 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-62bf2" Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.479297 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.479493 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.479906 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.492493 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-65pnw"] Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.581991 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rvxck\" (UniqueName: \"kubernetes.io/projected/59398ac1-b8ae-47b3-b00e-f9f245b4eb27-kube-api-access-rvxck\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-65pnw\" (UID: \"59398ac1-b8ae-47b3-b00e-f9f245b4eb27\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-65pnw" Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.582069 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/59398ac1-b8ae-47b3-b00e-f9f245b4eb27-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-65pnw\" (UID: \"59398ac1-b8ae-47b3-b00e-f9f245b4eb27\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-65pnw" Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.582585 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/59398ac1-b8ae-47b3-b00e-f9f245b4eb27-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-65pnw\" (UID: \"59398ac1-b8ae-47b3-b00e-f9f245b4eb27\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-65pnw" Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.684321 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rvxck\" (UniqueName: \"kubernetes.io/projected/59398ac1-b8ae-47b3-b00e-f9f245b4eb27-kube-api-access-rvxck\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-65pnw\" (UID: \"59398ac1-b8ae-47b3-b00e-f9f245b4eb27\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-65pnw" Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.684430 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/59398ac1-b8ae-47b3-b00e-f9f245b4eb27-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-65pnw\" (UID: \"59398ac1-b8ae-47b3-b00e-f9f245b4eb27\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-65pnw" Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.684556 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/59398ac1-b8ae-47b3-b00e-f9f245b4eb27-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-65pnw\" (UID: \"59398ac1-b8ae-47b3-b00e-f9f245b4eb27\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-65pnw" Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.690395 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/59398ac1-b8ae-47b3-b00e-f9f245b4eb27-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-65pnw\" (UID: \"59398ac1-b8ae-47b3-b00e-f9f245b4eb27\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-65pnw" Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.697268 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/59398ac1-b8ae-47b3-b00e-f9f245b4eb27-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-65pnw\" (UID: \"59398ac1-b8ae-47b3-b00e-f9f245b4eb27\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-65pnw" Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.709474 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rvxck\" (UniqueName: \"kubernetes.io/projected/59398ac1-b8ae-47b3-b00e-f9f245b4eb27-kube-api-access-rvxck\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-65pnw\" (UID: \"59398ac1-b8ae-47b3-b00e-f9f245b4eb27\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-65pnw" Nov 26 22:53:38 crc kubenswrapper[4903]: I1126 22:53:38.808431 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-65pnw" Nov 26 22:53:39 crc kubenswrapper[4903]: I1126 22:53:39.425996 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-65pnw"] Nov 26 22:53:39 crc kubenswrapper[4903]: W1126 22:53:39.438186 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod59398ac1_b8ae_47b3_b00e_f9f245b4eb27.slice/crio-2e71b424153078ccd3d955b78c7ca0a43b707d024a397d53ea28a2b22278f277 WatchSource:0}: Error finding container 2e71b424153078ccd3d955b78c7ca0a43b707d024a397d53ea28a2b22278f277: Status 404 returned error can't find the container with id 2e71b424153078ccd3d955b78c7ca0a43b707d024a397d53ea28a2b22278f277 Nov 26 22:53:39 crc kubenswrapper[4903]: I1126 22:53:39.442909 4903 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 22:53:40 crc kubenswrapper[4903]: I1126 22:53:40.364793 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6573f0cf-2bad-494e-8288-1c68c4326edb" path="/var/lib/kubelet/pods/6573f0cf-2bad-494e-8288-1c68c4326edb/volumes" Nov 26 22:53:40 crc kubenswrapper[4903]: I1126 22:53:40.397104 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-65pnw" event={"ID":"59398ac1-b8ae-47b3-b00e-f9f245b4eb27","Type":"ContainerStarted","Data":"2e71b424153078ccd3d955b78c7ca0a43b707d024a397d53ea28a2b22278f277"} Nov 26 22:53:41 crc kubenswrapper[4903]: I1126 22:53:41.415911 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-65pnw" event={"ID":"59398ac1-b8ae-47b3-b00e-f9f245b4eb27","Type":"ContainerStarted","Data":"4780ccbe9225ac0a36c932e0673fb303a9831ad6e73b23b34c8419384e0cc1a3"} Nov 26 22:53:41 crc kubenswrapper[4903]: I1126 22:53:41.436570 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-65pnw" podStartSLOduration=2.46281403 podStartE2EDuration="3.436544172s" podCreationTimestamp="2025-11-26 22:53:38 +0000 UTC" firstStartedPulling="2025-11-26 22:53:39.442521082 +0000 UTC m=+1948.132756002" lastFinishedPulling="2025-11-26 22:53:40.416251224 +0000 UTC m=+1949.106486144" observedRunningTime="2025-11-26 22:53:41.431024175 +0000 UTC m=+1950.121259105" watchObservedRunningTime="2025-11-26 22:53:41.436544172 +0000 UTC m=+1950.126779112" Nov 26 22:53:51 crc kubenswrapper[4903]: I1126 22:53:51.029279 4903 scope.go:117] "RemoveContainer" containerID="5225d111a6887c6aff9adea34e52808b691574abede178f64ce796508df9a2c9" Nov 26 22:53:51 crc kubenswrapper[4903]: E1126 22:53:51.030482 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 22:53:56 crc kubenswrapper[4903]: I1126 22:53:56.066559 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-create-55ll5"] Nov 26 22:53:56 crc kubenswrapper[4903]: I1126 22:53:56.085913 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-180d-account-create-update-8bmhg"] Nov 26 22:53:56 crc kubenswrapper[4903]: I1126 22:53:56.097975 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-create-55ll5"] Nov 26 22:53:56 crc kubenswrapper[4903]: I1126 22:53:56.107468 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-180d-account-create-update-8bmhg"] Nov 26 22:53:58 crc kubenswrapper[4903]: I1126 22:53:58.048006 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="444870af-f53d-4457-9650-f4de59dc6c14" path="/var/lib/kubelet/pods/444870af-f53d-4457-9650-f4de59dc6c14/volumes" Nov 26 22:53:58 crc kubenswrapper[4903]: I1126 22:53:58.049916 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ccb9a538-950d-4c50-9ee3-380703481e5e" path="/var/lib/kubelet/pods/ccb9a538-950d-4c50-9ee3-380703481e5e/volumes" Nov 26 22:54:02 crc kubenswrapper[4903]: I1126 22:54:02.059857 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-v8tkk"] Nov 26 22:54:02 crc kubenswrapper[4903]: I1126 22:54:02.064916 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-v8tkk"] Nov 26 22:54:03 crc kubenswrapper[4903]: I1126 22:54:03.068784 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-zs7fj"] Nov 26 22:54:03 crc kubenswrapper[4903]: I1126 22:54:03.093974 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-zs7fj"] Nov 26 22:54:04 crc kubenswrapper[4903]: I1126 22:54:04.045769 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2355bf0c-e104-4dcd-888a-e164fd5d89be" path="/var/lib/kubelet/pods/2355bf0c-e104-4dcd-888a-e164fd5d89be/volumes" Nov 26 22:54:04 crc kubenswrapper[4903]: I1126 22:54:04.047382 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be8f91a6-a9bc-4273-ad51-664e6e64ebb0" path="/var/lib/kubelet/pods/be8f91a6-a9bc-4273-ad51-664e6e64ebb0/volumes" Nov 26 22:54:05 crc kubenswrapper[4903]: I1126 22:54:05.029119 4903 scope.go:117] "RemoveContainer" containerID="5225d111a6887c6aff9adea34e52808b691574abede178f64ce796508df9a2c9" Nov 26 22:54:05 crc kubenswrapper[4903]: E1126 22:54:05.029873 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 22:54:18 crc kubenswrapper[4903]: I1126 22:54:18.029268 4903 scope.go:117] "RemoveContainer" containerID="5225d111a6887c6aff9adea34e52808b691574abede178f64ce796508df9a2c9" Nov 26 22:54:18 crc kubenswrapper[4903]: E1126 22:54:18.030089 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 22:54:20 crc kubenswrapper[4903]: I1126 22:54:20.755235 4903 scope.go:117] "RemoveContainer" containerID="33e9d9715f9763d48f40aa337a05a613716c9593eadba58be245347bddd936bb" Nov 26 22:54:20 crc kubenswrapper[4903]: I1126 22:54:20.803878 4903 scope.go:117] "RemoveContainer" containerID="277db87d3f7eed539d07fcdf860ff73245de3492f776f6db8aab98b2df719493" Nov 26 22:54:20 crc kubenswrapper[4903]: I1126 22:54:20.899611 4903 scope.go:117] "RemoveContainer" containerID="d9dcae88ab8041f2d589cbaa62e0b1a19bebe572a35d5907e3539cb2858a1808" Nov 26 22:54:20 crc kubenswrapper[4903]: I1126 22:54:20.951097 4903 scope.go:117] "RemoveContainer" containerID="cada2af09253f080bd319ef88c08e5a8d6beb4b9ba4dce77eb48dd3d8b95c18e" Nov 26 22:54:21 crc kubenswrapper[4903]: I1126 22:54:21.039975 4903 scope.go:117] "RemoveContainer" containerID="69697950149a64697ed4335dcd25e520b542879081887e309883001df4f79219" Nov 26 22:54:33 crc kubenswrapper[4903]: I1126 22:54:33.029043 4903 scope.go:117] "RemoveContainer" containerID="5225d111a6887c6aff9adea34e52808b691574abede178f64ce796508df9a2c9" Nov 26 22:54:34 crc kubenswrapper[4903]: I1126 22:54:34.252832 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerStarted","Data":"ee7a8eeaefe3eb0640adea0e74cd3d203e8a5b31c2b6d9bcb6062010871eadbc"} Nov 26 22:54:47 crc kubenswrapper[4903]: I1126 22:54:47.058907 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-jclt8"] Nov 26 22:54:47 crc kubenswrapper[4903]: I1126 22:54:47.078802 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-jclt8"] Nov 26 22:54:48 crc kubenswrapper[4903]: I1126 22:54:48.040887 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6" path="/var/lib/kubelet/pods/fdfbae1a-cfe8-4496-8e4d-2b4255dd34b6/volumes" Nov 26 22:55:01 crc kubenswrapper[4903]: I1126 22:55:01.612870 4903 generic.go:334] "Generic (PLEG): container finished" podID="59398ac1-b8ae-47b3-b00e-f9f245b4eb27" containerID="4780ccbe9225ac0a36c932e0673fb303a9831ad6e73b23b34c8419384e0cc1a3" exitCode=0 Nov 26 22:55:01 crc kubenswrapper[4903]: I1126 22:55:01.613071 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-65pnw" event={"ID":"59398ac1-b8ae-47b3-b00e-f9f245b4eb27","Type":"ContainerDied","Data":"4780ccbe9225ac0a36c932e0673fb303a9831ad6e73b23b34c8419384e0cc1a3"} Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.128018 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-65pnw" Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.172819 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/59398ac1-b8ae-47b3-b00e-f9f245b4eb27-ssh-key\") pod \"59398ac1-b8ae-47b3-b00e-f9f245b4eb27\" (UID: \"59398ac1-b8ae-47b3-b00e-f9f245b4eb27\") " Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.173204 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/59398ac1-b8ae-47b3-b00e-f9f245b4eb27-inventory\") pod \"59398ac1-b8ae-47b3-b00e-f9f245b4eb27\" (UID: \"59398ac1-b8ae-47b3-b00e-f9f245b4eb27\") " Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.173230 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rvxck\" (UniqueName: \"kubernetes.io/projected/59398ac1-b8ae-47b3-b00e-f9f245b4eb27-kube-api-access-rvxck\") pod \"59398ac1-b8ae-47b3-b00e-f9f245b4eb27\" (UID: \"59398ac1-b8ae-47b3-b00e-f9f245b4eb27\") " Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.179443 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59398ac1-b8ae-47b3-b00e-f9f245b4eb27-kube-api-access-rvxck" (OuterVolumeSpecName: "kube-api-access-rvxck") pod "59398ac1-b8ae-47b3-b00e-f9f245b4eb27" (UID: "59398ac1-b8ae-47b3-b00e-f9f245b4eb27"). InnerVolumeSpecName "kube-api-access-rvxck". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.206992 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59398ac1-b8ae-47b3-b00e-f9f245b4eb27-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "59398ac1-b8ae-47b3-b00e-f9f245b4eb27" (UID: "59398ac1-b8ae-47b3-b00e-f9f245b4eb27"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.219792 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59398ac1-b8ae-47b3-b00e-f9f245b4eb27-inventory" (OuterVolumeSpecName: "inventory") pod "59398ac1-b8ae-47b3-b00e-f9f245b4eb27" (UID: "59398ac1-b8ae-47b3-b00e-f9f245b4eb27"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.279255 4903 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/59398ac1-b8ae-47b3-b00e-f9f245b4eb27-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.279285 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rvxck\" (UniqueName: \"kubernetes.io/projected/59398ac1-b8ae-47b3-b00e-f9f245b4eb27-kube-api-access-rvxck\") on node \"crc\" DevicePath \"\"" Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.279296 4903 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/59398ac1-b8ae-47b3-b00e-f9f245b4eb27-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.645185 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-65pnw" event={"ID":"59398ac1-b8ae-47b3-b00e-f9f245b4eb27","Type":"ContainerDied","Data":"2e71b424153078ccd3d955b78c7ca0a43b707d024a397d53ea28a2b22278f277"} Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.645453 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2e71b424153078ccd3d955b78c7ca0a43b707d024a397d53ea28a2b22278f277" Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.645254 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-65pnw" Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.760499 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2"] Nov 26 22:55:03 crc kubenswrapper[4903]: E1126 22:55:03.761026 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59398ac1-b8ae-47b3-b00e-f9f245b4eb27" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.761039 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="59398ac1-b8ae-47b3-b00e-f9f245b4eb27" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.761295 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="59398ac1-b8ae-47b3-b00e-f9f245b4eb27" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.762274 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2" Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.765166 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.765209 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-62bf2" Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.765399 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.765442 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.779019 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2"] Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.789040 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfphv\" (UniqueName: \"kubernetes.io/projected/5b5ab3d3-0223-4b0f-ab25-785af487d360-kube-api-access-tfphv\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2\" (UID: \"5b5ab3d3-0223-4b0f-ab25-785af487d360\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2" Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.789092 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5b5ab3d3-0223-4b0f-ab25-785af487d360-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2\" (UID: \"5b5ab3d3-0223-4b0f-ab25-785af487d360\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2" Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.789165 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5b5ab3d3-0223-4b0f-ab25-785af487d360-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2\" (UID: \"5b5ab3d3-0223-4b0f-ab25-785af487d360\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2" Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.891467 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfphv\" (UniqueName: \"kubernetes.io/projected/5b5ab3d3-0223-4b0f-ab25-785af487d360-kube-api-access-tfphv\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2\" (UID: \"5b5ab3d3-0223-4b0f-ab25-785af487d360\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2" Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.891515 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5b5ab3d3-0223-4b0f-ab25-785af487d360-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2\" (UID: \"5b5ab3d3-0223-4b0f-ab25-785af487d360\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2" Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.891563 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5b5ab3d3-0223-4b0f-ab25-785af487d360-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2\" (UID: \"5b5ab3d3-0223-4b0f-ab25-785af487d360\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2" Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.899369 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5b5ab3d3-0223-4b0f-ab25-785af487d360-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2\" (UID: \"5b5ab3d3-0223-4b0f-ab25-785af487d360\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2" Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.904580 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5b5ab3d3-0223-4b0f-ab25-785af487d360-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2\" (UID: \"5b5ab3d3-0223-4b0f-ab25-785af487d360\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2" Nov 26 22:55:03 crc kubenswrapper[4903]: I1126 22:55:03.910180 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfphv\" (UniqueName: \"kubernetes.io/projected/5b5ab3d3-0223-4b0f-ab25-785af487d360-kube-api-access-tfphv\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2\" (UID: \"5b5ab3d3-0223-4b0f-ab25-785af487d360\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2" Nov 26 22:55:04 crc kubenswrapper[4903]: I1126 22:55:04.086159 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2" Nov 26 22:55:04 crc kubenswrapper[4903]: I1126 22:55:04.674369 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2"] Nov 26 22:55:05 crc kubenswrapper[4903]: I1126 22:55:05.691319 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2" event={"ID":"5b5ab3d3-0223-4b0f-ab25-785af487d360","Type":"ContainerStarted","Data":"c491d20d4532ff329638a1dd1f03c0c4c036c6c921a558fba3b94c6a5f45495a"} Nov 26 22:55:05 crc kubenswrapper[4903]: I1126 22:55:05.691805 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2" event={"ID":"5b5ab3d3-0223-4b0f-ab25-785af487d360","Type":"ContainerStarted","Data":"d0cd767f377bb4360bde961a37e094af70cafac6f1409ddfb855097a10bb6d0d"} Nov 26 22:55:10 crc kubenswrapper[4903]: I1126 22:55:10.756083 4903 generic.go:334] "Generic (PLEG): container finished" podID="5b5ab3d3-0223-4b0f-ab25-785af487d360" containerID="c491d20d4532ff329638a1dd1f03c0c4c036c6c921a558fba3b94c6a5f45495a" exitCode=0 Nov 26 22:55:10 crc kubenswrapper[4903]: I1126 22:55:10.756232 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2" event={"ID":"5b5ab3d3-0223-4b0f-ab25-785af487d360","Type":"ContainerDied","Data":"c491d20d4532ff329638a1dd1f03c0c4c036c6c921a558fba3b94c6a5f45495a"} Nov 26 22:55:12 crc kubenswrapper[4903]: I1126 22:55:12.233240 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2" Nov 26 22:55:12 crc kubenswrapper[4903]: I1126 22:55:12.312072 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5b5ab3d3-0223-4b0f-ab25-785af487d360-inventory\") pod \"5b5ab3d3-0223-4b0f-ab25-785af487d360\" (UID: \"5b5ab3d3-0223-4b0f-ab25-785af487d360\") " Nov 26 22:55:12 crc kubenswrapper[4903]: I1126 22:55:12.312265 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5b5ab3d3-0223-4b0f-ab25-785af487d360-ssh-key\") pod \"5b5ab3d3-0223-4b0f-ab25-785af487d360\" (UID: \"5b5ab3d3-0223-4b0f-ab25-785af487d360\") " Nov 26 22:55:12 crc kubenswrapper[4903]: I1126 22:55:12.312295 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tfphv\" (UniqueName: \"kubernetes.io/projected/5b5ab3d3-0223-4b0f-ab25-785af487d360-kube-api-access-tfphv\") pod \"5b5ab3d3-0223-4b0f-ab25-785af487d360\" (UID: \"5b5ab3d3-0223-4b0f-ab25-785af487d360\") " Nov 26 22:55:12 crc kubenswrapper[4903]: I1126 22:55:12.318491 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b5ab3d3-0223-4b0f-ab25-785af487d360-kube-api-access-tfphv" (OuterVolumeSpecName: "kube-api-access-tfphv") pod "5b5ab3d3-0223-4b0f-ab25-785af487d360" (UID: "5b5ab3d3-0223-4b0f-ab25-785af487d360"). InnerVolumeSpecName "kube-api-access-tfphv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:55:12 crc kubenswrapper[4903]: I1126 22:55:12.360534 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b5ab3d3-0223-4b0f-ab25-785af487d360-inventory" (OuterVolumeSpecName: "inventory") pod "5b5ab3d3-0223-4b0f-ab25-785af487d360" (UID: "5b5ab3d3-0223-4b0f-ab25-785af487d360"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:55:12 crc kubenswrapper[4903]: I1126 22:55:12.364360 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b5ab3d3-0223-4b0f-ab25-785af487d360-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "5b5ab3d3-0223-4b0f-ab25-785af487d360" (UID: "5b5ab3d3-0223-4b0f-ab25-785af487d360"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:55:12 crc kubenswrapper[4903]: I1126 22:55:12.415534 4903 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5b5ab3d3-0223-4b0f-ab25-785af487d360-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 22:55:12 crc kubenswrapper[4903]: I1126 22:55:12.415582 4903 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5b5ab3d3-0223-4b0f-ab25-785af487d360-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 22:55:12 crc kubenswrapper[4903]: I1126 22:55:12.415599 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tfphv\" (UniqueName: \"kubernetes.io/projected/5b5ab3d3-0223-4b0f-ab25-785af487d360-kube-api-access-tfphv\") on node \"crc\" DevicePath \"\"" Nov 26 22:55:12 crc kubenswrapper[4903]: I1126 22:55:12.787097 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2" event={"ID":"5b5ab3d3-0223-4b0f-ab25-785af487d360","Type":"ContainerDied","Data":"d0cd767f377bb4360bde961a37e094af70cafac6f1409ddfb855097a10bb6d0d"} Nov 26 22:55:12 crc kubenswrapper[4903]: I1126 22:55:12.787460 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d0cd767f377bb4360bde961a37e094af70cafac6f1409ddfb855097a10bb6d0d" Nov 26 22:55:12 crc kubenswrapper[4903]: I1126 22:55:12.787521 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2" Nov 26 22:55:12 crc kubenswrapper[4903]: I1126 22:55:12.872365 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-wh5kt"] Nov 26 22:55:12 crc kubenswrapper[4903]: E1126 22:55:12.873186 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b5ab3d3-0223-4b0f-ab25-785af487d360" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 26 22:55:12 crc kubenswrapper[4903]: I1126 22:55:12.873223 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b5ab3d3-0223-4b0f-ab25-785af487d360" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 26 22:55:12 crc kubenswrapper[4903]: I1126 22:55:12.873730 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b5ab3d3-0223-4b0f-ab25-785af487d360" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 26 22:55:12 crc kubenswrapper[4903]: I1126 22:55:12.875192 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wh5kt" Nov 26 22:55:12 crc kubenswrapper[4903]: I1126 22:55:12.877495 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 22:55:12 crc kubenswrapper[4903]: I1126 22:55:12.877878 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 22:55:12 crc kubenswrapper[4903]: I1126 22:55:12.878504 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-62bf2" Nov 26 22:55:12 crc kubenswrapper[4903]: I1126 22:55:12.878746 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 22:55:12 crc kubenswrapper[4903]: I1126 22:55:12.912879 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-wh5kt"] Nov 26 22:55:12 crc kubenswrapper[4903]: I1126 22:55:12.932937 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lw9sw\" (UniqueName: \"kubernetes.io/projected/31e264f2-c649-43e0-af90-ca65e2cb84da-kube-api-access-lw9sw\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-wh5kt\" (UID: \"31e264f2-c649-43e0-af90-ca65e2cb84da\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wh5kt" Nov 26 22:55:12 crc kubenswrapper[4903]: I1126 22:55:12.933219 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/31e264f2-c649-43e0-af90-ca65e2cb84da-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-wh5kt\" (UID: \"31e264f2-c649-43e0-af90-ca65e2cb84da\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wh5kt" Nov 26 22:55:12 crc kubenswrapper[4903]: I1126 22:55:12.934218 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/31e264f2-c649-43e0-af90-ca65e2cb84da-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-wh5kt\" (UID: \"31e264f2-c649-43e0-af90-ca65e2cb84da\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wh5kt" Nov 26 22:55:13 crc kubenswrapper[4903]: I1126 22:55:13.037588 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lw9sw\" (UniqueName: \"kubernetes.io/projected/31e264f2-c649-43e0-af90-ca65e2cb84da-kube-api-access-lw9sw\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-wh5kt\" (UID: \"31e264f2-c649-43e0-af90-ca65e2cb84da\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wh5kt" Nov 26 22:55:13 crc kubenswrapper[4903]: I1126 22:55:13.037732 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/31e264f2-c649-43e0-af90-ca65e2cb84da-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-wh5kt\" (UID: \"31e264f2-c649-43e0-af90-ca65e2cb84da\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wh5kt" Nov 26 22:55:13 crc kubenswrapper[4903]: I1126 22:55:13.037779 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/31e264f2-c649-43e0-af90-ca65e2cb84da-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-wh5kt\" (UID: \"31e264f2-c649-43e0-af90-ca65e2cb84da\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wh5kt" Nov 26 22:55:13 crc kubenswrapper[4903]: I1126 22:55:13.042935 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/31e264f2-c649-43e0-af90-ca65e2cb84da-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-wh5kt\" (UID: \"31e264f2-c649-43e0-af90-ca65e2cb84da\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wh5kt" Nov 26 22:55:13 crc kubenswrapper[4903]: I1126 22:55:13.043279 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/31e264f2-c649-43e0-af90-ca65e2cb84da-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-wh5kt\" (UID: \"31e264f2-c649-43e0-af90-ca65e2cb84da\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wh5kt" Nov 26 22:55:13 crc kubenswrapper[4903]: I1126 22:55:13.054079 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lw9sw\" (UniqueName: \"kubernetes.io/projected/31e264f2-c649-43e0-af90-ca65e2cb84da-kube-api-access-lw9sw\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-wh5kt\" (UID: \"31e264f2-c649-43e0-af90-ca65e2cb84da\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wh5kt" Nov 26 22:55:13 crc kubenswrapper[4903]: I1126 22:55:13.205522 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wh5kt" Nov 26 22:55:13 crc kubenswrapper[4903]: I1126 22:55:13.951008 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-wh5kt"] Nov 26 22:55:14 crc kubenswrapper[4903]: I1126 22:55:14.822109 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wh5kt" event={"ID":"31e264f2-c649-43e0-af90-ca65e2cb84da","Type":"ContainerStarted","Data":"593704ad2ee166eaccd4c0c533601054454392a1228308454eaaddb996e51e05"} Nov 26 22:55:14 crc kubenswrapper[4903]: I1126 22:55:14.822761 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wh5kt" event={"ID":"31e264f2-c649-43e0-af90-ca65e2cb84da","Type":"ContainerStarted","Data":"b09cb99ff3d65c436a99747f2caf5316ca74c4b93e81509e9eabdf40f585c75c"} Nov 26 22:55:14 crc kubenswrapper[4903]: I1126 22:55:14.849774 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wh5kt" podStartSLOduration=2.3676868620000002 podStartE2EDuration="2.849690475s" podCreationTimestamp="2025-11-26 22:55:12 +0000 UTC" firstStartedPulling="2025-11-26 22:55:13.953289743 +0000 UTC m=+2042.643524663" lastFinishedPulling="2025-11-26 22:55:14.435293366 +0000 UTC m=+2043.125528276" observedRunningTime="2025-11-26 22:55:14.838411315 +0000 UTC m=+2043.528646225" watchObservedRunningTime="2025-11-26 22:55:14.849690475 +0000 UTC m=+2043.539925375" Nov 26 22:55:21 crc kubenswrapper[4903]: I1126 22:55:21.236404 4903 scope.go:117] "RemoveContainer" containerID="b4acb01676addc4a31a9a6154c8702b0e59ee8687425da49a5197dca79d739e3" Nov 26 22:56:02 crc kubenswrapper[4903]: I1126 22:56:02.508093 4903 generic.go:334] "Generic (PLEG): container finished" podID="31e264f2-c649-43e0-af90-ca65e2cb84da" containerID="593704ad2ee166eaccd4c0c533601054454392a1228308454eaaddb996e51e05" exitCode=0 Nov 26 22:56:02 crc kubenswrapper[4903]: I1126 22:56:02.508145 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wh5kt" event={"ID":"31e264f2-c649-43e0-af90-ca65e2cb84da","Type":"ContainerDied","Data":"593704ad2ee166eaccd4c0c533601054454392a1228308454eaaddb996e51e05"} Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.035927 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wh5kt" Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.105269 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/31e264f2-c649-43e0-af90-ca65e2cb84da-inventory\") pod \"31e264f2-c649-43e0-af90-ca65e2cb84da\" (UID: \"31e264f2-c649-43e0-af90-ca65e2cb84da\") " Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.105551 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/31e264f2-c649-43e0-af90-ca65e2cb84da-ssh-key\") pod \"31e264f2-c649-43e0-af90-ca65e2cb84da\" (UID: \"31e264f2-c649-43e0-af90-ca65e2cb84da\") " Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.105634 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lw9sw\" (UniqueName: \"kubernetes.io/projected/31e264f2-c649-43e0-af90-ca65e2cb84da-kube-api-access-lw9sw\") pod \"31e264f2-c649-43e0-af90-ca65e2cb84da\" (UID: \"31e264f2-c649-43e0-af90-ca65e2cb84da\") " Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.128175 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31e264f2-c649-43e0-af90-ca65e2cb84da-kube-api-access-lw9sw" (OuterVolumeSpecName: "kube-api-access-lw9sw") pod "31e264f2-c649-43e0-af90-ca65e2cb84da" (UID: "31e264f2-c649-43e0-af90-ca65e2cb84da"). InnerVolumeSpecName "kube-api-access-lw9sw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.151278 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31e264f2-c649-43e0-af90-ca65e2cb84da-inventory" (OuterVolumeSpecName: "inventory") pod "31e264f2-c649-43e0-af90-ca65e2cb84da" (UID: "31e264f2-c649-43e0-af90-ca65e2cb84da"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.160159 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31e264f2-c649-43e0-af90-ca65e2cb84da-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "31e264f2-c649-43e0-af90-ca65e2cb84da" (UID: "31e264f2-c649-43e0-af90-ca65e2cb84da"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.207828 4903 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/31e264f2-c649-43e0-af90-ca65e2cb84da-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.207859 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lw9sw\" (UniqueName: \"kubernetes.io/projected/31e264f2-c649-43e0-af90-ca65e2cb84da-kube-api-access-lw9sw\") on node \"crc\" DevicePath \"\"" Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.207869 4903 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/31e264f2-c649-43e0-af90-ca65e2cb84da-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.531216 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wh5kt" event={"ID":"31e264f2-c649-43e0-af90-ca65e2cb84da","Type":"ContainerDied","Data":"b09cb99ff3d65c436a99747f2caf5316ca74c4b93e81509e9eabdf40f585c75c"} Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.531458 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b09cb99ff3d65c436a99747f2caf5316ca74c4b93e81509e9eabdf40f585c75c" Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.531352 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-wh5kt" Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.658932 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dg882"] Nov 26 22:56:04 crc kubenswrapper[4903]: E1126 22:56:04.659386 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31e264f2-c649-43e0-af90-ca65e2cb84da" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.659403 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="31e264f2-c649-43e0-af90-ca65e2cb84da" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.659633 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="31e264f2-c649-43e0-af90-ca65e2cb84da" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.660462 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dg882" Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.662557 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.663149 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.663250 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-62bf2" Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.663365 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.687437 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dg882"] Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.724877 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vwfc\" (UniqueName: \"kubernetes.io/projected/e186f675-8a6e-4e8d-8531-247e10617355-kube-api-access-4vwfc\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-dg882\" (UID: \"e186f675-8a6e-4e8d-8531-247e10617355\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dg882" Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.725004 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e186f675-8a6e-4e8d-8531-247e10617355-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-dg882\" (UID: \"e186f675-8a6e-4e8d-8531-247e10617355\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dg882" Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.725075 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e186f675-8a6e-4e8d-8531-247e10617355-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-dg882\" (UID: \"e186f675-8a6e-4e8d-8531-247e10617355\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dg882" Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.827308 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e186f675-8a6e-4e8d-8531-247e10617355-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-dg882\" (UID: \"e186f675-8a6e-4e8d-8531-247e10617355\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dg882" Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.827396 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e186f675-8a6e-4e8d-8531-247e10617355-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-dg882\" (UID: \"e186f675-8a6e-4e8d-8531-247e10617355\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dg882" Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.827589 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vwfc\" (UniqueName: \"kubernetes.io/projected/e186f675-8a6e-4e8d-8531-247e10617355-kube-api-access-4vwfc\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-dg882\" (UID: \"e186f675-8a6e-4e8d-8531-247e10617355\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dg882" Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.833816 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e186f675-8a6e-4e8d-8531-247e10617355-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-dg882\" (UID: \"e186f675-8a6e-4e8d-8531-247e10617355\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dg882" Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.838334 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e186f675-8a6e-4e8d-8531-247e10617355-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-dg882\" (UID: \"e186f675-8a6e-4e8d-8531-247e10617355\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dg882" Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.849542 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vwfc\" (UniqueName: \"kubernetes.io/projected/e186f675-8a6e-4e8d-8531-247e10617355-kube-api-access-4vwfc\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-dg882\" (UID: \"e186f675-8a6e-4e8d-8531-247e10617355\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dg882" Nov 26 22:56:04 crc kubenswrapper[4903]: I1126 22:56:04.989212 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dg882" Nov 26 22:56:05 crc kubenswrapper[4903]: I1126 22:56:05.596462 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dg882"] Nov 26 22:56:06 crc kubenswrapper[4903]: I1126 22:56:06.565302 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dg882" event={"ID":"e186f675-8a6e-4e8d-8531-247e10617355","Type":"ContainerStarted","Data":"0fce61bb6e27b15703239fdc6a941fc9e45e66966c57add6a9572c53214a11c6"} Nov 26 22:56:06 crc kubenswrapper[4903]: I1126 22:56:06.565765 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dg882" event={"ID":"e186f675-8a6e-4e8d-8531-247e10617355","Type":"ContainerStarted","Data":"f5e666018f34d0f176bb12129434ffe7a7cf3bff9cc2abf02188b59ff0c78ba4"} Nov 26 22:56:06 crc kubenswrapper[4903]: I1126 22:56:06.582682 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dg882" podStartSLOduration=2.059331194 podStartE2EDuration="2.582666713s" podCreationTimestamp="2025-11-26 22:56:04 +0000 UTC" firstStartedPulling="2025-11-26 22:56:05.592200964 +0000 UTC m=+2094.282435924" lastFinishedPulling="2025-11-26 22:56:06.115536523 +0000 UTC m=+2094.805771443" observedRunningTime="2025-11-26 22:56:06.578516592 +0000 UTC m=+2095.268751502" watchObservedRunningTime="2025-11-26 22:56:06.582666713 +0000 UTC m=+2095.272901623" Nov 26 22:56:24 crc kubenswrapper[4903]: I1126 22:56:24.382413 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9d82l"] Nov 26 22:56:24 crc kubenswrapper[4903]: I1126 22:56:24.386277 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9d82l" Nov 26 22:56:24 crc kubenswrapper[4903]: I1126 22:56:24.411391 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9d82l"] Nov 26 22:56:24 crc kubenswrapper[4903]: I1126 22:56:24.477939 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc11ad69-27f7-4ae3-9c16-0a5825c90b91-utilities\") pod \"redhat-operators-9d82l\" (UID: \"dc11ad69-27f7-4ae3-9c16-0a5825c90b91\") " pod="openshift-marketplace/redhat-operators-9d82l" Nov 26 22:56:24 crc kubenswrapper[4903]: I1126 22:56:24.478044 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc11ad69-27f7-4ae3-9c16-0a5825c90b91-catalog-content\") pod \"redhat-operators-9d82l\" (UID: \"dc11ad69-27f7-4ae3-9c16-0a5825c90b91\") " pod="openshift-marketplace/redhat-operators-9d82l" Nov 26 22:56:24 crc kubenswrapper[4903]: I1126 22:56:24.478150 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9d4nt\" (UniqueName: \"kubernetes.io/projected/dc11ad69-27f7-4ae3-9c16-0a5825c90b91-kube-api-access-9d4nt\") pod \"redhat-operators-9d82l\" (UID: \"dc11ad69-27f7-4ae3-9c16-0a5825c90b91\") " pod="openshift-marketplace/redhat-operators-9d82l" Nov 26 22:56:24 crc kubenswrapper[4903]: I1126 22:56:24.581077 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc11ad69-27f7-4ae3-9c16-0a5825c90b91-utilities\") pod \"redhat-operators-9d82l\" (UID: \"dc11ad69-27f7-4ae3-9c16-0a5825c90b91\") " pod="openshift-marketplace/redhat-operators-9d82l" Nov 26 22:56:24 crc kubenswrapper[4903]: I1126 22:56:24.581179 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc11ad69-27f7-4ae3-9c16-0a5825c90b91-catalog-content\") pod \"redhat-operators-9d82l\" (UID: \"dc11ad69-27f7-4ae3-9c16-0a5825c90b91\") " pod="openshift-marketplace/redhat-operators-9d82l" Nov 26 22:56:24 crc kubenswrapper[4903]: I1126 22:56:24.581202 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9d4nt\" (UniqueName: \"kubernetes.io/projected/dc11ad69-27f7-4ae3-9c16-0a5825c90b91-kube-api-access-9d4nt\") pod \"redhat-operators-9d82l\" (UID: \"dc11ad69-27f7-4ae3-9c16-0a5825c90b91\") " pod="openshift-marketplace/redhat-operators-9d82l" Nov 26 22:56:24 crc kubenswrapper[4903]: I1126 22:56:24.581665 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc11ad69-27f7-4ae3-9c16-0a5825c90b91-catalog-content\") pod \"redhat-operators-9d82l\" (UID: \"dc11ad69-27f7-4ae3-9c16-0a5825c90b91\") " pod="openshift-marketplace/redhat-operators-9d82l" Nov 26 22:56:24 crc kubenswrapper[4903]: I1126 22:56:24.581953 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc11ad69-27f7-4ae3-9c16-0a5825c90b91-utilities\") pod \"redhat-operators-9d82l\" (UID: \"dc11ad69-27f7-4ae3-9c16-0a5825c90b91\") " pod="openshift-marketplace/redhat-operators-9d82l" Nov 26 22:56:24 crc kubenswrapper[4903]: I1126 22:56:24.604544 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9d4nt\" (UniqueName: \"kubernetes.io/projected/dc11ad69-27f7-4ae3-9c16-0a5825c90b91-kube-api-access-9d4nt\") pod \"redhat-operators-9d82l\" (UID: \"dc11ad69-27f7-4ae3-9c16-0a5825c90b91\") " pod="openshift-marketplace/redhat-operators-9d82l" Nov 26 22:56:24 crc kubenswrapper[4903]: I1126 22:56:24.718192 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9d82l" Nov 26 22:56:25 crc kubenswrapper[4903]: I1126 22:56:25.342452 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9d82l"] Nov 26 22:56:25 crc kubenswrapper[4903]: I1126 22:56:25.885555 4903 generic.go:334] "Generic (PLEG): container finished" podID="dc11ad69-27f7-4ae3-9c16-0a5825c90b91" containerID="8c2d06b8bfe27ea8d8d55079bf62e02c90a8af9798879bc3e64671b6a8c3fd95" exitCode=0 Nov 26 22:56:25 crc kubenswrapper[4903]: I1126 22:56:25.885702 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9d82l" event={"ID":"dc11ad69-27f7-4ae3-9c16-0a5825c90b91","Type":"ContainerDied","Data":"8c2d06b8bfe27ea8d8d55079bf62e02c90a8af9798879bc3e64671b6a8c3fd95"} Nov 26 22:56:25 crc kubenswrapper[4903]: I1126 22:56:25.886043 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9d82l" event={"ID":"dc11ad69-27f7-4ae3-9c16-0a5825c90b91","Type":"ContainerStarted","Data":"3e11cf104aca8cc390dfc17ce50f09d5e74cb6c182f9c10563f1759ff2396dae"} Nov 26 22:56:28 crc kubenswrapper[4903]: I1126 22:56:28.926667 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9d82l" event={"ID":"dc11ad69-27f7-4ae3-9c16-0a5825c90b91","Type":"ContainerStarted","Data":"f0aa106efdbfc7e89dfcf1e25c6d415936337a1c621096450947673255b3fce7"} Nov 26 22:56:34 crc kubenswrapper[4903]: I1126 22:56:34.000053 4903 generic.go:334] "Generic (PLEG): container finished" podID="dc11ad69-27f7-4ae3-9c16-0a5825c90b91" containerID="f0aa106efdbfc7e89dfcf1e25c6d415936337a1c621096450947673255b3fce7" exitCode=0 Nov 26 22:56:34 crc kubenswrapper[4903]: I1126 22:56:34.000115 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9d82l" event={"ID":"dc11ad69-27f7-4ae3-9c16-0a5825c90b91","Type":"ContainerDied","Data":"f0aa106efdbfc7e89dfcf1e25c6d415936337a1c621096450947673255b3fce7"} Nov 26 22:56:37 crc kubenswrapper[4903]: I1126 22:56:37.050451 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9d82l" event={"ID":"dc11ad69-27f7-4ae3-9c16-0a5825c90b91","Type":"ContainerStarted","Data":"9c108f4594fd35cc1b173f645c56cd67787b8a606649084a24b8d49cc68c4869"} Nov 26 22:56:37 crc kubenswrapper[4903]: I1126 22:56:37.077510 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9d82l" podStartSLOduration=2.50857754 podStartE2EDuration="13.077488446s" podCreationTimestamp="2025-11-26 22:56:24 +0000 UTC" firstStartedPulling="2025-11-26 22:56:25.888506531 +0000 UTC m=+2114.578741431" lastFinishedPulling="2025-11-26 22:56:36.457417387 +0000 UTC m=+2125.147652337" observedRunningTime="2025-11-26 22:56:37.076197232 +0000 UTC m=+2125.766432162" watchObservedRunningTime="2025-11-26 22:56:37.077488446 +0000 UTC m=+2125.767723366" Nov 26 22:56:44 crc kubenswrapper[4903]: I1126 22:56:44.718992 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9d82l" Nov 26 22:56:44 crc kubenswrapper[4903]: I1126 22:56:44.719763 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9d82l" Nov 26 22:56:45 crc kubenswrapper[4903]: I1126 22:56:45.775836 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9d82l" podUID="dc11ad69-27f7-4ae3-9c16-0a5825c90b91" containerName="registry-server" probeResult="failure" output=< Nov 26 22:56:45 crc kubenswrapper[4903]: timeout: failed to connect service ":50051" within 1s Nov 26 22:56:45 crc kubenswrapper[4903]: > Nov 26 22:56:51 crc kubenswrapper[4903]: I1126 22:56:51.969186 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-wr586"] Nov 26 22:56:51 crc kubenswrapper[4903]: I1126 22:56:51.972474 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wr586" Nov 26 22:56:51 crc kubenswrapper[4903]: I1126 22:56:51.993074 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8dd7646-3929-498a-bfbd-40857a75e6fb-catalog-content\") pod \"community-operators-wr586\" (UID: \"b8dd7646-3929-498a-bfbd-40857a75e6fb\") " pod="openshift-marketplace/community-operators-wr586" Nov 26 22:56:51 crc kubenswrapper[4903]: I1126 22:56:51.993134 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8dd7646-3929-498a-bfbd-40857a75e6fb-utilities\") pod \"community-operators-wr586\" (UID: \"b8dd7646-3929-498a-bfbd-40857a75e6fb\") " pod="openshift-marketplace/community-operators-wr586" Nov 26 22:56:51 crc kubenswrapper[4903]: I1126 22:56:51.993222 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t44w9\" (UniqueName: \"kubernetes.io/projected/b8dd7646-3929-498a-bfbd-40857a75e6fb-kube-api-access-t44w9\") pod \"community-operators-wr586\" (UID: \"b8dd7646-3929-498a-bfbd-40857a75e6fb\") " pod="openshift-marketplace/community-operators-wr586" Nov 26 22:56:51 crc kubenswrapper[4903]: I1126 22:56:51.998593 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wr586"] Nov 26 22:56:52 crc kubenswrapper[4903]: I1126 22:56:52.095560 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t44w9\" (UniqueName: \"kubernetes.io/projected/b8dd7646-3929-498a-bfbd-40857a75e6fb-kube-api-access-t44w9\") pod \"community-operators-wr586\" (UID: \"b8dd7646-3929-498a-bfbd-40857a75e6fb\") " pod="openshift-marketplace/community-operators-wr586" Nov 26 22:56:52 crc kubenswrapper[4903]: I1126 22:56:52.095749 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8dd7646-3929-498a-bfbd-40857a75e6fb-catalog-content\") pod \"community-operators-wr586\" (UID: \"b8dd7646-3929-498a-bfbd-40857a75e6fb\") " pod="openshift-marketplace/community-operators-wr586" Nov 26 22:56:52 crc kubenswrapper[4903]: I1126 22:56:52.095795 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8dd7646-3929-498a-bfbd-40857a75e6fb-utilities\") pod \"community-operators-wr586\" (UID: \"b8dd7646-3929-498a-bfbd-40857a75e6fb\") " pod="openshift-marketplace/community-operators-wr586" Nov 26 22:56:52 crc kubenswrapper[4903]: I1126 22:56:52.096348 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8dd7646-3929-498a-bfbd-40857a75e6fb-utilities\") pod \"community-operators-wr586\" (UID: \"b8dd7646-3929-498a-bfbd-40857a75e6fb\") " pod="openshift-marketplace/community-operators-wr586" Nov 26 22:56:52 crc kubenswrapper[4903]: I1126 22:56:52.096364 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8dd7646-3929-498a-bfbd-40857a75e6fb-catalog-content\") pod \"community-operators-wr586\" (UID: \"b8dd7646-3929-498a-bfbd-40857a75e6fb\") " pod="openshift-marketplace/community-operators-wr586" Nov 26 22:56:52 crc kubenswrapper[4903]: I1126 22:56:52.118953 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t44w9\" (UniqueName: \"kubernetes.io/projected/b8dd7646-3929-498a-bfbd-40857a75e6fb-kube-api-access-t44w9\") pod \"community-operators-wr586\" (UID: \"b8dd7646-3929-498a-bfbd-40857a75e6fb\") " pod="openshift-marketplace/community-operators-wr586" Nov 26 22:56:52 crc kubenswrapper[4903]: I1126 22:56:52.306563 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wr586" Nov 26 22:56:52 crc kubenswrapper[4903]: W1126 22:56:52.847368 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb8dd7646_3929_498a_bfbd_40857a75e6fb.slice/crio-480c5923f4509293f1019f7d75196dddf9c7644a1588f9cea78267d738370fb6 WatchSource:0}: Error finding container 480c5923f4509293f1019f7d75196dddf9c7644a1588f9cea78267d738370fb6: Status 404 returned error can't find the container with id 480c5923f4509293f1019f7d75196dddf9c7644a1588f9cea78267d738370fb6 Nov 26 22:56:52 crc kubenswrapper[4903]: I1126 22:56:52.855598 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wr586"] Nov 26 22:56:53 crc kubenswrapper[4903]: I1126 22:56:53.621380 4903 generic.go:334] "Generic (PLEG): container finished" podID="b8dd7646-3929-498a-bfbd-40857a75e6fb" containerID="0db918e64f1f9e0307f74e0240dd110cab3561fd223c12054d9ac453ff8943be" exitCode=0 Nov 26 22:56:53 crc kubenswrapper[4903]: I1126 22:56:53.621666 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wr586" event={"ID":"b8dd7646-3929-498a-bfbd-40857a75e6fb","Type":"ContainerDied","Data":"0db918e64f1f9e0307f74e0240dd110cab3561fd223c12054d9ac453ff8943be"} Nov 26 22:56:53 crc kubenswrapper[4903]: I1126 22:56:53.621723 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wr586" event={"ID":"b8dd7646-3929-498a-bfbd-40857a75e6fb","Type":"ContainerStarted","Data":"480c5923f4509293f1019f7d75196dddf9c7644a1588f9cea78267d738370fb6"} Nov 26 22:56:54 crc kubenswrapper[4903]: I1126 22:56:54.821383 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9d82l" Nov 26 22:56:54 crc kubenswrapper[4903]: I1126 22:56:54.899103 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9d82l" Nov 26 22:56:56 crc kubenswrapper[4903]: I1126 22:56:56.343080 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9d82l"] Nov 26 22:56:56 crc kubenswrapper[4903]: I1126 22:56:56.655845 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9d82l" podUID="dc11ad69-27f7-4ae3-9c16-0a5825c90b91" containerName="registry-server" containerID="cri-o://9c108f4594fd35cc1b173f645c56cd67787b8a606649084a24b8d49cc68c4869" gracePeriod=2 Nov 26 22:56:57 crc kubenswrapper[4903]: I1126 22:56:57.684476 4903 generic.go:334] "Generic (PLEG): container finished" podID="dc11ad69-27f7-4ae3-9c16-0a5825c90b91" containerID="9c108f4594fd35cc1b173f645c56cd67787b8a606649084a24b8d49cc68c4869" exitCode=0 Nov 26 22:56:57 crc kubenswrapper[4903]: I1126 22:56:57.684572 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9d82l" event={"ID":"dc11ad69-27f7-4ae3-9c16-0a5825c90b91","Type":"ContainerDied","Data":"9c108f4594fd35cc1b173f645c56cd67787b8a606649084a24b8d49cc68c4869"} Nov 26 22:56:59 crc kubenswrapper[4903]: I1126 22:56:59.519647 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9d82l" Nov 26 22:56:59 crc kubenswrapper[4903]: I1126 22:56:59.692159 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc11ad69-27f7-4ae3-9c16-0a5825c90b91-utilities\") pod \"dc11ad69-27f7-4ae3-9c16-0a5825c90b91\" (UID: \"dc11ad69-27f7-4ae3-9c16-0a5825c90b91\") " Nov 26 22:56:59 crc kubenswrapper[4903]: I1126 22:56:59.693910 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dc11ad69-27f7-4ae3-9c16-0a5825c90b91-utilities" (OuterVolumeSpecName: "utilities") pod "dc11ad69-27f7-4ae3-9c16-0a5825c90b91" (UID: "dc11ad69-27f7-4ae3-9c16-0a5825c90b91"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:56:59 crc kubenswrapper[4903]: I1126 22:56:59.694028 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9d4nt\" (UniqueName: \"kubernetes.io/projected/dc11ad69-27f7-4ae3-9c16-0a5825c90b91-kube-api-access-9d4nt\") pod \"dc11ad69-27f7-4ae3-9c16-0a5825c90b91\" (UID: \"dc11ad69-27f7-4ae3-9c16-0a5825c90b91\") " Nov 26 22:56:59 crc kubenswrapper[4903]: I1126 22:56:59.694082 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc11ad69-27f7-4ae3-9c16-0a5825c90b91-catalog-content\") pod \"dc11ad69-27f7-4ae3-9c16-0a5825c90b91\" (UID: \"dc11ad69-27f7-4ae3-9c16-0a5825c90b91\") " Nov 26 22:56:59 crc kubenswrapper[4903]: I1126 22:56:59.695611 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc11ad69-27f7-4ae3-9c16-0a5825c90b91-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 22:56:59 crc kubenswrapper[4903]: I1126 22:56:59.719655 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc11ad69-27f7-4ae3-9c16-0a5825c90b91-kube-api-access-9d4nt" (OuterVolumeSpecName: "kube-api-access-9d4nt") pod "dc11ad69-27f7-4ae3-9c16-0a5825c90b91" (UID: "dc11ad69-27f7-4ae3-9c16-0a5825c90b91"). InnerVolumeSpecName "kube-api-access-9d4nt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:56:59 crc kubenswrapper[4903]: I1126 22:56:59.721686 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wr586" event={"ID":"b8dd7646-3929-498a-bfbd-40857a75e6fb","Type":"ContainerStarted","Data":"02c1053f8f9f3834aa275b21310e7883c870d694808bcef65713c89d8ca10067"} Nov 26 22:56:59 crc kubenswrapper[4903]: I1126 22:56:59.725005 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9d82l" event={"ID":"dc11ad69-27f7-4ae3-9c16-0a5825c90b91","Type":"ContainerDied","Data":"3e11cf104aca8cc390dfc17ce50f09d5e74cb6c182f9c10563f1759ff2396dae"} Nov 26 22:56:59 crc kubenswrapper[4903]: I1126 22:56:59.725063 4903 scope.go:117] "RemoveContainer" containerID="9c108f4594fd35cc1b173f645c56cd67787b8a606649084a24b8d49cc68c4869" Nov 26 22:56:59 crc kubenswrapper[4903]: I1126 22:56:59.725102 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9d82l" Nov 26 22:56:59 crc kubenswrapper[4903]: I1126 22:56:59.798099 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9d4nt\" (UniqueName: \"kubernetes.io/projected/dc11ad69-27f7-4ae3-9c16-0a5825c90b91-kube-api-access-9d4nt\") on node \"crc\" DevicePath \"\"" Nov 26 22:56:59 crc kubenswrapper[4903]: I1126 22:56:59.803246 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dc11ad69-27f7-4ae3-9c16-0a5825c90b91-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dc11ad69-27f7-4ae3-9c16-0a5825c90b91" (UID: "dc11ad69-27f7-4ae3-9c16-0a5825c90b91"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:56:59 crc kubenswrapper[4903]: I1126 22:56:59.849841 4903 scope.go:117] "RemoveContainer" containerID="f0aa106efdbfc7e89dfcf1e25c6d415936337a1c621096450947673255b3fce7" Nov 26 22:56:59 crc kubenswrapper[4903]: I1126 22:56:59.881079 4903 scope.go:117] "RemoveContainer" containerID="8c2d06b8bfe27ea8d8d55079bf62e02c90a8af9798879bc3e64671b6a8c3fd95" Nov 26 22:56:59 crc kubenswrapper[4903]: I1126 22:56:59.901093 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc11ad69-27f7-4ae3-9c16-0a5825c90b91-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 22:57:00 crc kubenswrapper[4903]: I1126 22:57:00.145004 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9d82l"] Nov 26 22:57:00 crc kubenswrapper[4903]: I1126 22:57:00.159842 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-9d82l"] Nov 26 22:57:00 crc kubenswrapper[4903]: I1126 22:57:00.739995 4903 generic.go:334] "Generic (PLEG): container finished" podID="b8dd7646-3929-498a-bfbd-40857a75e6fb" containerID="02c1053f8f9f3834aa275b21310e7883c870d694808bcef65713c89d8ca10067" exitCode=0 Nov 26 22:57:00 crc kubenswrapper[4903]: I1126 22:57:00.740078 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wr586" event={"ID":"b8dd7646-3929-498a-bfbd-40857a75e6fb","Type":"ContainerDied","Data":"02c1053f8f9f3834aa275b21310e7883c870d694808bcef65713c89d8ca10067"} Nov 26 22:57:01 crc kubenswrapper[4903]: I1126 22:57:01.758331 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wr586" event={"ID":"b8dd7646-3929-498a-bfbd-40857a75e6fb","Type":"ContainerStarted","Data":"b1a39e2ab2032db1c83337edb75de1a4a9ff43a3d532af1a6512bbcfbaf50fc2"} Nov 26 22:57:01 crc kubenswrapper[4903]: I1126 22:57:01.783764 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-wr586" podStartSLOduration=3.16494745 podStartE2EDuration="10.783744378s" podCreationTimestamp="2025-11-26 22:56:51 +0000 UTC" firstStartedPulling="2025-11-26 22:56:53.623722718 +0000 UTC m=+2142.313957638" lastFinishedPulling="2025-11-26 22:57:01.242519626 +0000 UTC m=+2149.932754566" observedRunningTime="2025-11-26 22:57:01.783473561 +0000 UTC m=+2150.473708481" watchObservedRunningTime="2025-11-26 22:57:01.783744378 +0000 UTC m=+2150.473979288" Nov 26 22:57:01 crc kubenswrapper[4903]: I1126 22:57:01.981144 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 22:57:01 crc kubenswrapper[4903]: I1126 22:57:01.981203 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 22:57:02 crc kubenswrapper[4903]: I1126 22:57:02.052538 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc11ad69-27f7-4ae3-9c16-0a5825c90b91" path="/var/lib/kubelet/pods/dc11ad69-27f7-4ae3-9c16-0a5825c90b91/volumes" Nov 26 22:57:02 crc kubenswrapper[4903]: I1126 22:57:02.306953 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-wr586" Nov 26 22:57:02 crc kubenswrapper[4903]: I1126 22:57:02.307921 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-wr586" Nov 26 22:57:03 crc kubenswrapper[4903]: I1126 22:57:03.370306 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-wr586" podUID="b8dd7646-3929-498a-bfbd-40857a75e6fb" containerName="registry-server" probeResult="failure" output=< Nov 26 22:57:03 crc kubenswrapper[4903]: timeout: failed to connect service ":50051" within 1s Nov 26 22:57:03 crc kubenswrapper[4903]: > Nov 26 22:57:06 crc kubenswrapper[4903]: I1126 22:57:06.826953 4903 generic.go:334] "Generic (PLEG): container finished" podID="e186f675-8a6e-4e8d-8531-247e10617355" containerID="0fce61bb6e27b15703239fdc6a941fc9e45e66966c57add6a9572c53214a11c6" exitCode=0 Nov 26 22:57:06 crc kubenswrapper[4903]: I1126 22:57:06.827092 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dg882" event={"ID":"e186f675-8a6e-4e8d-8531-247e10617355","Type":"ContainerDied","Data":"0fce61bb6e27b15703239fdc6a941fc9e45e66966c57add6a9572c53214a11c6"} Nov 26 22:57:08 crc kubenswrapper[4903]: I1126 22:57:08.369114 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dg882" Nov 26 22:57:08 crc kubenswrapper[4903]: I1126 22:57:08.552992 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e186f675-8a6e-4e8d-8531-247e10617355-ssh-key\") pod \"e186f675-8a6e-4e8d-8531-247e10617355\" (UID: \"e186f675-8a6e-4e8d-8531-247e10617355\") " Nov 26 22:57:08 crc kubenswrapper[4903]: I1126 22:57:08.553595 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4vwfc\" (UniqueName: \"kubernetes.io/projected/e186f675-8a6e-4e8d-8531-247e10617355-kube-api-access-4vwfc\") pod \"e186f675-8a6e-4e8d-8531-247e10617355\" (UID: \"e186f675-8a6e-4e8d-8531-247e10617355\") " Nov 26 22:57:08 crc kubenswrapper[4903]: I1126 22:57:08.553760 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e186f675-8a6e-4e8d-8531-247e10617355-inventory\") pod \"e186f675-8a6e-4e8d-8531-247e10617355\" (UID: \"e186f675-8a6e-4e8d-8531-247e10617355\") " Nov 26 22:57:08 crc kubenswrapper[4903]: I1126 22:57:08.560164 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e186f675-8a6e-4e8d-8531-247e10617355-kube-api-access-4vwfc" (OuterVolumeSpecName: "kube-api-access-4vwfc") pod "e186f675-8a6e-4e8d-8531-247e10617355" (UID: "e186f675-8a6e-4e8d-8531-247e10617355"). InnerVolumeSpecName "kube-api-access-4vwfc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:57:08 crc kubenswrapper[4903]: I1126 22:57:08.588848 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e186f675-8a6e-4e8d-8531-247e10617355-inventory" (OuterVolumeSpecName: "inventory") pod "e186f675-8a6e-4e8d-8531-247e10617355" (UID: "e186f675-8a6e-4e8d-8531-247e10617355"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:57:08 crc kubenswrapper[4903]: I1126 22:57:08.605404 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e186f675-8a6e-4e8d-8531-247e10617355-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e186f675-8a6e-4e8d-8531-247e10617355" (UID: "e186f675-8a6e-4e8d-8531-247e10617355"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:57:08 crc kubenswrapper[4903]: I1126 22:57:08.657793 4903 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e186f675-8a6e-4e8d-8531-247e10617355-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 22:57:08 crc kubenswrapper[4903]: I1126 22:57:08.657871 4903 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e186f675-8a6e-4e8d-8531-247e10617355-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 22:57:08 crc kubenswrapper[4903]: I1126 22:57:08.657892 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4vwfc\" (UniqueName: \"kubernetes.io/projected/e186f675-8a6e-4e8d-8531-247e10617355-kube-api-access-4vwfc\") on node \"crc\" DevicePath \"\"" Nov 26 22:57:08 crc kubenswrapper[4903]: I1126 22:57:08.855553 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dg882" Nov 26 22:57:08 crc kubenswrapper[4903]: I1126 22:57:08.855541 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-dg882" event={"ID":"e186f675-8a6e-4e8d-8531-247e10617355","Type":"ContainerDied","Data":"f5e666018f34d0f176bb12129434ffe7a7cf3bff9cc2abf02188b59ff0c78ba4"} Nov 26 22:57:08 crc kubenswrapper[4903]: I1126 22:57:08.855668 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f5e666018f34d0f176bb12129434ffe7a7cf3bff9cc2abf02188b59ff0c78ba4" Nov 26 22:57:08 crc kubenswrapper[4903]: I1126 22:57:08.960201 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-wrjtk"] Nov 26 22:57:08 crc kubenswrapper[4903]: E1126 22:57:08.960787 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc11ad69-27f7-4ae3-9c16-0a5825c90b91" containerName="registry-server" Nov 26 22:57:08 crc kubenswrapper[4903]: I1126 22:57:08.960807 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc11ad69-27f7-4ae3-9c16-0a5825c90b91" containerName="registry-server" Nov 26 22:57:08 crc kubenswrapper[4903]: E1126 22:57:08.960833 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc11ad69-27f7-4ae3-9c16-0a5825c90b91" containerName="extract-content" Nov 26 22:57:08 crc kubenswrapper[4903]: I1126 22:57:08.960839 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc11ad69-27f7-4ae3-9c16-0a5825c90b91" containerName="extract-content" Nov 26 22:57:08 crc kubenswrapper[4903]: E1126 22:57:08.960859 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc11ad69-27f7-4ae3-9c16-0a5825c90b91" containerName="extract-utilities" Nov 26 22:57:08 crc kubenswrapper[4903]: I1126 22:57:08.960866 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc11ad69-27f7-4ae3-9c16-0a5825c90b91" containerName="extract-utilities" Nov 26 22:57:08 crc kubenswrapper[4903]: E1126 22:57:08.960883 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e186f675-8a6e-4e8d-8531-247e10617355" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 26 22:57:08 crc kubenswrapper[4903]: I1126 22:57:08.960890 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="e186f675-8a6e-4e8d-8531-247e10617355" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 26 22:57:08 crc kubenswrapper[4903]: I1126 22:57:08.961105 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="e186f675-8a6e-4e8d-8531-247e10617355" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 26 22:57:08 crc kubenswrapper[4903]: I1126 22:57:08.961123 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc11ad69-27f7-4ae3-9c16-0a5825c90b91" containerName="registry-server" Nov 26 22:57:08 crc kubenswrapper[4903]: I1126 22:57:08.962005 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-wrjtk" Nov 26 22:57:08 crc kubenswrapper[4903]: I1126 22:57:08.965942 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-62bf2" Nov 26 22:57:08 crc kubenswrapper[4903]: I1126 22:57:08.967315 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 22:57:08 crc kubenswrapper[4903]: I1126 22:57:08.967461 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 22:57:08 crc kubenswrapper[4903]: I1126 22:57:08.967620 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 22:57:08 crc kubenswrapper[4903]: I1126 22:57:08.975398 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6grpg\" (UniqueName: \"kubernetes.io/projected/078bfb36-1f57-4173-b01a-cc7a6e3862dc-kube-api-access-6grpg\") pod \"ssh-known-hosts-edpm-deployment-wrjtk\" (UID: \"078bfb36-1f57-4173-b01a-cc7a6e3862dc\") " pod="openstack/ssh-known-hosts-edpm-deployment-wrjtk" Nov 26 22:57:08 crc kubenswrapper[4903]: I1126 22:57:08.981843 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/078bfb36-1f57-4173-b01a-cc7a6e3862dc-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-wrjtk\" (UID: \"078bfb36-1f57-4173-b01a-cc7a6e3862dc\") " pod="openstack/ssh-known-hosts-edpm-deployment-wrjtk" Nov 26 22:57:08 crc kubenswrapper[4903]: I1126 22:57:08.982117 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/078bfb36-1f57-4173-b01a-cc7a6e3862dc-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-wrjtk\" (UID: \"078bfb36-1f57-4173-b01a-cc7a6e3862dc\") " pod="openstack/ssh-known-hosts-edpm-deployment-wrjtk" Nov 26 22:57:08 crc kubenswrapper[4903]: I1126 22:57:08.989917 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-wrjtk"] Nov 26 22:57:09 crc kubenswrapper[4903]: I1126 22:57:09.085440 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/078bfb36-1f57-4173-b01a-cc7a6e3862dc-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-wrjtk\" (UID: \"078bfb36-1f57-4173-b01a-cc7a6e3862dc\") " pod="openstack/ssh-known-hosts-edpm-deployment-wrjtk" Nov 26 22:57:09 crc kubenswrapper[4903]: I1126 22:57:09.085751 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/078bfb36-1f57-4173-b01a-cc7a6e3862dc-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-wrjtk\" (UID: \"078bfb36-1f57-4173-b01a-cc7a6e3862dc\") " pod="openstack/ssh-known-hosts-edpm-deployment-wrjtk" Nov 26 22:57:09 crc kubenswrapper[4903]: I1126 22:57:09.086494 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6grpg\" (UniqueName: \"kubernetes.io/projected/078bfb36-1f57-4173-b01a-cc7a6e3862dc-kube-api-access-6grpg\") pod \"ssh-known-hosts-edpm-deployment-wrjtk\" (UID: \"078bfb36-1f57-4173-b01a-cc7a6e3862dc\") " pod="openstack/ssh-known-hosts-edpm-deployment-wrjtk" Nov 26 22:57:09 crc kubenswrapper[4903]: I1126 22:57:09.091098 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/078bfb36-1f57-4173-b01a-cc7a6e3862dc-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-wrjtk\" (UID: \"078bfb36-1f57-4173-b01a-cc7a6e3862dc\") " pod="openstack/ssh-known-hosts-edpm-deployment-wrjtk" Nov 26 22:57:09 crc kubenswrapper[4903]: I1126 22:57:09.093431 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/078bfb36-1f57-4173-b01a-cc7a6e3862dc-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-wrjtk\" (UID: \"078bfb36-1f57-4173-b01a-cc7a6e3862dc\") " pod="openstack/ssh-known-hosts-edpm-deployment-wrjtk" Nov 26 22:57:09 crc kubenswrapper[4903]: I1126 22:57:09.106952 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6grpg\" (UniqueName: \"kubernetes.io/projected/078bfb36-1f57-4173-b01a-cc7a6e3862dc-kube-api-access-6grpg\") pod \"ssh-known-hosts-edpm-deployment-wrjtk\" (UID: \"078bfb36-1f57-4173-b01a-cc7a6e3862dc\") " pod="openstack/ssh-known-hosts-edpm-deployment-wrjtk" Nov 26 22:57:09 crc kubenswrapper[4903]: I1126 22:57:09.292116 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-wrjtk" Nov 26 22:57:09 crc kubenswrapper[4903]: I1126 22:57:09.950176 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-wrjtk"] Nov 26 22:57:10 crc kubenswrapper[4903]: I1126 22:57:10.894567 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-wrjtk" event={"ID":"078bfb36-1f57-4173-b01a-cc7a6e3862dc","Type":"ContainerStarted","Data":"1429469feef4cb2937570e5709458d3183c298a67a64c18986a1f1673bc824ab"} Nov 26 22:57:10 crc kubenswrapper[4903]: I1126 22:57:10.895301 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-wrjtk" event={"ID":"078bfb36-1f57-4173-b01a-cc7a6e3862dc","Type":"ContainerStarted","Data":"980b580886fc74b66c7d4f9a11626d30830eb2052f77a8b8224bd4a870e8ff6b"} Nov 26 22:57:10 crc kubenswrapper[4903]: I1126 22:57:10.926320 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-wrjtk" podStartSLOduration=2.41381478 podStartE2EDuration="2.926295636s" podCreationTimestamp="2025-11-26 22:57:08 +0000 UTC" firstStartedPulling="2025-11-26 22:57:09.962951238 +0000 UTC m=+2158.653186158" lastFinishedPulling="2025-11-26 22:57:10.475432064 +0000 UTC m=+2159.165667014" observedRunningTime="2025-11-26 22:57:10.919402092 +0000 UTC m=+2159.609637032" watchObservedRunningTime="2025-11-26 22:57:10.926295636 +0000 UTC m=+2159.616530576" Nov 26 22:57:12 crc kubenswrapper[4903]: I1126 22:57:12.400563 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-wr586" Nov 26 22:57:12 crc kubenswrapper[4903]: I1126 22:57:12.490080 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-wr586" Nov 26 22:57:12 crc kubenswrapper[4903]: I1126 22:57:12.631071 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wr586"] Nov 26 22:57:12 crc kubenswrapper[4903]: I1126 22:57:12.685364 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rtxgl"] Nov 26 22:57:12 crc kubenswrapper[4903]: I1126 22:57:12.685636 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-rtxgl" podUID="63d08938-8b23-4efc-a3d6-ff8fccfb45e4" containerName="registry-server" containerID="cri-o://bb293a9a89edfa818b7c4fcd5402e6e1f797f5bfe9cfc0d9ea5acbffbbdd91ad" gracePeriod=2 Nov 26 22:57:12 crc kubenswrapper[4903]: I1126 22:57:12.926449 4903 generic.go:334] "Generic (PLEG): container finished" podID="63d08938-8b23-4efc-a3d6-ff8fccfb45e4" containerID="bb293a9a89edfa818b7c4fcd5402e6e1f797f5bfe9cfc0d9ea5acbffbbdd91ad" exitCode=0 Nov 26 22:57:12 crc kubenswrapper[4903]: I1126 22:57:12.928249 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rtxgl" event={"ID":"63d08938-8b23-4efc-a3d6-ff8fccfb45e4","Type":"ContainerDied","Data":"bb293a9a89edfa818b7c4fcd5402e6e1f797f5bfe9cfc0d9ea5acbffbbdd91ad"} Nov 26 22:57:13 crc kubenswrapper[4903]: I1126 22:57:13.215875 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rtxgl" Nov 26 22:57:13 crc kubenswrapper[4903]: I1126 22:57:13.415759 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63d08938-8b23-4efc-a3d6-ff8fccfb45e4-utilities\") pod \"63d08938-8b23-4efc-a3d6-ff8fccfb45e4\" (UID: \"63d08938-8b23-4efc-a3d6-ff8fccfb45e4\") " Nov 26 22:57:13 crc kubenswrapper[4903]: I1126 22:57:13.415899 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8w9kl\" (UniqueName: \"kubernetes.io/projected/63d08938-8b23-4efc-a3d6-ff8fccfb45e4-kube-api-access-8w9kl\") pod \"63d08938-8b23-4efc-a3d6-ff8fccfb45e4\" (UID: \"63d08938-8b23-4efc-a3d6-ff8fccfb45e4\") " Nov 26 22:57:13 crc kubenswrapper[4903]: I1126 22:57:13.415992 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63d08938-8b23-4efc-a3d6-ff8fccfb45e4-catalog-content\") pod \"63d08938-8b23-4efc-a3d6-ff8fccfb45e4\" (UID: \"63d08938-8b23-4efc-a3d6-ff8fccfb45e4\") " Nov 26 22:57:13 crc kubenswrapper[4903]: I1126 22:57:13.416250 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63d08938-8b23-4efc-a3d6-ff8fccfb45e4-utilities" (OuterVolumeSpecName: "utilities") pod "63d08938-8b23-4efc-a3d6-ff8fccfb45e4" (UID: "63d08938-8b23-4efc-a3d6-ff8fccfb45e4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:57:13 crc kubenswrapper[4903]: I1126 22:57:13.416838 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63d08938-8b23-4efc-a3d6-ff8fccfb45e4-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 22:57:13 crc kubenswrapper[4903]: I1126 22:57:13.424820 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63d08938-8b23-4efc-a3d6-ff8fccfb45e4-kube-api-access-8w9kl" (OuterVolumeSpecName: "kube-api-access-8w9kl") pod "63d08938-8b23-4efc-a3d6-ff8fccfb45e4" (UID: "63d08938-8b23-4efc-a3d6-ff8fccfb45e4"). InnerVolumeSpecName "kube-api-access-8w9kl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:57:13 crc kubenswrapper[4903]: I1126 22:57:13.459595 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63d08938-8b23-4efc-a3d6-ff8fccfb45e4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "63d08938-8b23-4efc-a3d6-ff8fccfb45e4" (UID: "63d08938-8b23-4efc-a3d6-ff8fccfb45e4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:57:13 crc kubenswrapper[4903]: I1126 22:57:13.518556 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8w9kl\" (UniqueName: \"kubernetes.io/projected/63d08938-8b23-4efc-a3d6-ff8fccfb45e4-kube-api-access-8w9kl\") on node \"crc\" DevicePath \"\"" Nov 26 22:57:13 crc kubenswrapper[4903]: I1126 22:57:13.518599 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63d08938-8b23-4efc-a3d6-ff8fccfb45e4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 22:57:13 crc kubenswrapper[4903]: I1126 22:57:13.941836 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rtxgl" event={"ID":"63d08938-8b23-4efc-a3d6-ff8fccfb45e4","Type":"ContainerDied","Data":"6b66eb36e331ccb3542168d00c757edbd10f2abe1098313bb2b034d50236f25f"} Nov 26 22:57:13 crc kubenswrapper[4903]: I1126 22:57:13.942198 4903 scope.go:117] "RemoveContainer" containerID="bb293a9a89edfa818b7c4fcd5402e6e1f797f5bfe9cfc0d9ea5acbffbbdd91ad" Nov 26 22:57:13 crc kubenswrapper[4903]: I1126 22:57:13.941889 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rtxgl" Nov 26 22:57:13 crc kubenswrapper[4903]: I1126 22:57:13.981820 4903 scope.go:117] "RemoveContainer" containerID="a4d462645d2bf479274590d249aaca46da87b30d133681033db52dd3371dcf00" Nov 26 22:57:14 crc kubenswrapper[4903]: I1126 22:57:14.008247 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rtxgl"] Nov 26 22:57:14 crc kubenswrapper[4903]: I1126 22:57:14.021825 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-rtxgl"] Nov 26 22:57:14 crc kubenswrapper[4903]: I1126 22:57:14.030440 4903 scope.go:117] "RemoveContainer" containerID="31c91ceaf7db193abff6bcd618c2f2219be6524ef3f5d7961f79df02de5ee435" Nov 26 22:57:14 crc kubenswrapper[4903]: I1126 22:57:14.044384 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="63d08938-8b23-4efc-a3d6-ff8fccfb45e4" path="/var/lib/kubelet/pods/63d08938-8b23-4efc-a3d6-ff8fccfb45e4/volumes" Nov 26 22:57:19 crc kubenswrapper[4903]: I1126 22:57:19.039986 4903 generic.go:334] "Generic (PLEG): container finished" podID="078bfb36-1f57-4173-b01a-cc7a6e3862dc" containerID="1429469feef4cb2937570e5709458d3183c298a67a64c18986a1f1673bc824ab" exitCode=0 Nov 26 22:57:19 crc kubenswrapper[4903]: I1126 22:57:19.040059 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-wrjtk" event={"ID":"078bfb36-1f57-4173-b01a-cc7a6e3862dc","Type":"ContainerDied","Data":"1429469feef4cb2937570e5709458d3183c298a67a64c18986a1f1673bc824ab"} Nov 26 22:57:20 crc kubenswrapper[4903]: I1126 22:57:20.600621 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-wrjtk" Nov 26 22:57:20 crc kubenswrapper[4903]: I1126 22:57:20.626433 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/078bfb36-1f57-4173-b01a-cc7a6e3862dc-inventory-0\") pod \"078bfb36-1f57-4173-b01a-cc7a6e3862dc\" (UID: \"078bfb36-1f57-4173-b01a-cc7a6e3862dc\") " Nov 26 22:57:20 crc kubenswrapper[4903]: I1126 22:57:20.626594 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/078bfb36-1f57-4173-b01a-cc7a6e3862dc-ssh-key-openstack-edpm-ipam\") pod \"078bfb36-1f57-4173-b01a-cc7a6e3862dc\" (UID: \"078bfb36-1f57-4173-b01a-cc7a6e3862dc\") " Nov 26 22:57:20 crc kubenswrapper[4903]: I1126 22:57:20.626915 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6grpg\" (UniqueName: \"kubernetes.io/projected/078bfb36-1f57-4173-b01a-cc7a6e3862dc-kube-api-access-6grpg\") pod \"078bfb36-1f57-4173-b01a-cc7a6e3862dc\" (UID: \"078bfb36-1f57-4173-b01a-cc7a6e3862dc\") " Nov 26 22:57:20 crc kubenswrapper[4903]: I1126 22:57:20.639066 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/078bfb36-1f57-4173-b01a-cc7a6e3862dc-kube-api-access-6grpg" (OuterVolumeSpecName: "kube-api-access-6grpg") pod "078bfb36-1f57-4173-b01a-cc7a6e3862dc" (UID: "078bfb36-1f57-4173-b01a-cc7a6e3862dc"). InnerVolumeSpecName "kube-api-access-6grpg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:57:20 crc kubenswrapper[4903]: I1126 22:57:20.686802 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/078bfb36-1f57-4173-b01a-cc7a6e3862dc-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "078bfb36-1f57-4173-b01a-cc7a6e3862dc" (UID: "078bfb36-1f57-4173-b01a-cc7a6e3862dc"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:57:20 crc kubenswrapper[4903]: I1126 22:57:20.701249 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/078bfb36-1f57-4173-b01a-cc7a6e3862dc-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "078bfb36-1f57-4173-b01a-cc7a6e3862dc" (UID: "078bfb36-1f57-4173-b01a-cc7a6e3862dc"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:57:20 crc kubenswrapper[4903]: I1126 22:57:20.731832 4903 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/078bfb36-1f57-4173-b01a-cc7a6e3862dc-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 26 22:57:20 crc kubenswrapper[4903]: I1126 22:57:20.731895 4903 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/078bfb36-1f57-4173-b01a-cc7a6e3862dc-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 26 22:57:20 crc kubenswrapper[4903]: I1126 22:57:20.731919 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6grpg\" (UniqueName: \"kubernetes.io/projected/078bfb36-1f57-4173-b01a-cc7a6e3862dc-kube-api-access-6grpg\") on node \"crc\" DevicePath \"\"" Nov 26 22:57:21 crc kubenswrapper[4903]: I1126 22:57:21.075161 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-wrjtk" event={"ID":"078bfb36-1f57-4173-b01a-cc7a6e3862dc","Type":"ContainerDied","Data":"980b580886fc74b66c7d4f9a11626d30830eb2052f77a8b8224bd4a870e8ff6b"} Nov 26 22:57:21 crc kubenswrapper[4903]: I1126 22:57:21.075222 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="980b580886fc74b66c7d4f9a11626d30830eb2052f77a8b8224bd4a870e8ff6b" Nov 26 22:57:21 crc kubenswrapper[4903]: I1126 22:57:21.075232 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-wrjtk" Nov 26 22:57:21 crc kubenswrapper[4903]: I1126 22:57:21.216405 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-sgr64"] Nov 26 22:57:21 crc kubenswrapper[4903]: E1126 22:57:21.217897 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63d08938-8b23-4efc-a3d6-ff8fccfb45e4" containerName="extract-utilities" Nov 26 22:57:21 crc kubenswrapper[4903]: I1126 22:57:21.217931 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="63d08938-8b23-4efc-a3d6-ff8fccfb45e4" containerName="extract-utilities" Nov 26 22:57:21 crc kubenswrapper[4903]: E1126 22:57:21.218012 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="078bfb36-1f57-4173-b01a-cc7a6e3862dc" containerName="ssh-known-hosts-edpm-deployment" Nov 26 22:57:21 crc kubenswrapper[4903]: I1126 22:57:21.218024 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="078bfb36-1f57-4173-b01a-cc7a6e3862dc" containerName="ssh-known-hosts-edpm-deployment" Nov 26 22:57:21 crc kubenswrapper[4903]: E1126 22:57:21.218094 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63d08938-8b23-4efc-a3d6-ff8fccfb45e4" containerName="registry-server" Nov 26 22:57:21 crc kubenswrapper[4903]: I1126 22:57:21.218109 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="63d08938-8b23-4efc-a3d6-ff8fccfb45e4" containerName="registry-server" Nov 26 22:57:21 crc kubenswrapper[4903]: E1126 22:57:21.218204 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63d08938-8b23-4efc-a3d6-ff8fccfb45e4" containerName="extract-content" Nov 26 22:57:21 crc kubenswrapper[4903]: I1126 22:57:21.218215 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="63d08938-8b23-4efc-a3d6-ff8fccfb45e4" containerName="extract-content" Nov 26 22:57:21 crc kubenswrapper[4903]: I1126 22:57:21.219128 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="078bfb36-1f57-4173-b01a-cc7a6e3862dc" containerName="ssh-known-hosts-edpm-deployment" Nov 26 22:57:21 crc kubenswrapper[4903]: I1126 22:57:21.219212 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="63d08938-8b23-4efc-a3d6-ff8fccfb45e4" containerName="registry-server" Nov 26 22:57:21 crc kubenswrapper[4903]: I1126 22:57:21.221254 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-sgr64" Nov 26 22:57:21 crc kubenswrapper[4903]: I1126 22:57:21.224445 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-62bf2" Nov 26 22:57:21 crc kubenswrapper[4903]: I1126 22:57:21.224760 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 22:57:21 crc kubenswrapper[4903]: I1126 22:57:21.225537 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 22:57:21 crc kubenswrapper[4903]: I1126 22:57:21.227574 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 22:57:21 crc kubenswrapper[4903]: I1126 22:57:21.251528 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f8164f4b-1f48-4f38-810b-3a3b636c48ed-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-sgr64\" (UID: \"f8164f4b-1f48-4f38-810b-3a3b636c48ed\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-sgr64" Nov 26 22:57:21 crc kubenswrapper[4903]: I1126 22:57:21.251929 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6lb2\" (UniqueName: \"kubernetes.io/projected/f8164f4b-1f48-4f38-810b-3a3b636c48ed-kube-api-access-l6lb2\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-sgr64\" (UID: \"f8164f4b-1f48-4f38-810b-3a3b636c48ed\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-sgr64" Nov 26 22:57:21 crc kubenswrapper[4903]: I1126 22:57:21.252012 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f8164f4b-1f48-4f38-810b-3a3b636c48ed-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-sgr64\" (UID: \"f8164f4b-1f48-4f38-810b-3a3b636c48ed\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-sgr64" Nov 26 22:57:21 crc kubenswrapper[4903]: I1126 22:57:21.254023 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-sgr64"] Nov 26 22:57:21 crc kubenswrapper[4903]: I1126 22:57:21.354632 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6lb2\" (UniqueName: \"kubernetes.io/projected/f8164f4b-1f48-4f38-810b-3a3b636c48ed-kube-api-access-l6lb2\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-sgr64\" (UID: \"f8164f4b-1f48-4f38-810b-3a3b636c48ed\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-sgr64" Nov 26 22:57:21 crc kubenswrapper[4903]: I1126 22:57:21.354687 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f8164f4b-1f48-4f38-810b-3a3b636c48ed-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-sgr64\" (UID: \"f8164f4b-1f48-4f38-810b-3a3b636c48ed\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-sgr64" Nov 26 22:57:21 crc kubenswrapper[4903]: I1126 22:57:21.354789 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f8164f4b-1f48-4f38-810b-3a3b636c48ed-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-sgr64\" (UID: \"f8164f4b-1f48-4f38-810b-3a3b636c48ed\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-sgr64" Nov 26 22:57:21 crc kubenswrapper[4903]: I1126 22:57:21.359137 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f8164f4b-1f48-4f38-810b-3a3b636c48ed-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-sgr64\" (UID: \"f8164f4b-1f48-4f38-810b-3a3b636c48ed\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-sgr64" Nov 26 22:57:21 crc kubenswrapper[4903]: I1126 22:57:21.359407 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f8164f4b-1f48-4f38-810b-3a3b636c48ed-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-sgr64\" (UID: \"f8164f4b-1f48-4f38-810b-3a3b636c48ed\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-sgr64" Nov 26 22:57:21 crc kubenswrapper[4903]: I1126 22:57:21.383322 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6lb2\" (UniqueName: \"kubernetes.io/projected/f8164f4b-1f48-4f38-810b-3a3b636c48ed-kube-api-access-l6lb2\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-sgr64\" (UID: \"f8164f4b-1f48-4f38-810b-3a3b636c48ed\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-sgr64" Nov 26 22:57:21 crc kubenswrapper[4903]: I1126 22:57:21.563967 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-sgr64" Nov 26 22:57:22 crc kubenswrapper[4903]: I1126 22:57:22.168479 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-sgr64"] Nov 26 22:57:23 crc kubenswrapper[4903]: I1126 22:57:23.107311 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-sgr64" event={"ID":"f8164f4b-1f48-4f38-810b-3a3b636c48ed","Type":"ContainerStarted","Data":"29133447e654daa48baf027bc729306f6d98a8f5c9a978e5ca0588fa2d63ac29"} Nov 26 22:57:23 crc kubenswrapper[4903]: I1126 22:57:23.108276 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-sgr64" event={"ID":"f8164f4b-1f48-4f38-810b-3a3b636c48ed","Type":"ContainerStarted","Data":"501c00eaeb75554399fd7aaff724f193c20ba598642cb743f1893e5278758661"} Nov 26 22:57:23 crc kubenswrapper[4903]: I1126 22:57:23.135340 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-sgr64" podStartSLOduration=1.692289723 podStartE2EDuration="2.135314327s" podCreationTimestamp="2025-11-26 22:57:21 +0000 UTC" firstStartedPulling="2025-11-26 22:57:22.176326406 +0000 UTC m=+2170.866561316" lastFinishedPulling="2025-11-26 22:57:22.61935101 +0000 UTC m=+2171.309585920" observedRunningTime="2025-11-26 22:57:23.131878246 +0000 UTC m=+2171.822113196" watchObservedRunningTime="2025-11-26 22:57:23.135314327 +0000 UTC m=+2171.825549277" Nov 26 22:57:31 crc kubenswrapper[4903]: I1126 22:57:31.981503 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 22:57:31 crc kubenswrapper[4903]: I1126 22:57:31.982318 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 22:57:32 crc kubenswrapper[4903]: I1126 22:57:32.317313 4903 generic.go:334] "Generic (PLEG): container finished" podID="f8164f4b-1f48-4f38-810b-3a3b636c48ed" containerID="29133447e654daa48baf027bc729306f6d98a8f5c9a978e5ca0588fa2d63ac29" exitCode=0 Nov 26 22:57:32 crc kubenswrapper[4903]: I1126 22:57:32.317416 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-sgr64" event={"ID":"f8164f4b-1f48-4f38-810b-3a3b636c48ed","Type":"ContainerDied","Data":"29133447e654daa48baf027bc729306f6d98a8f5c9a978e5ca0588fa2d63ac29"} Nov 26 22:57:33 crc kubenswrapper[4903]: I1126 22:57:33.071119 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-wsb64"] Nov 26 22:57:33 crc kubenswrapper[4903]: I1126 22:57:33.093593 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-wsb64"] Nov 26 22:57:33 crc kubenswrapper[4903]: I1126 22:57:33.914063 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-sgr64" Nov 26 22:57:33 crc kubenswrapper[4903]: I1126 22:57:33.999443 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f8164f4b-1f48-4f38-810b-3a3b636c48ed-ssh-key\") pod \"f8164f4b-1f48-4f38-810b-3a3b636c48ed\" (UID: \"f8164f4b-1f48-4f38-810b-3a3b636c48ed\") " Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.000037 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l6lb2\" (UniqueName: \"kubernetes.io/projected/f8164f4b-1f48-4f38-810b-3a3b636c48ed-kube-api-access-l6lb2\") pod \"f8164f4b-1f48-4f38-810b-3a3b636c48ed\" (UID: \"f8164f4b-1f48-4f38-810b-3a3b636c48ed\") " Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.000363 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f8164f4b-1f48-4f38-810b-3a3b636c48ed-inventory\") pod \"f8164f4b-1f48-4f38-810b-3a3b636c48ed\" (UID: \"f8164f4b-1f48-4f38-810b-3a3b636c48ed\") " Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.005732 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8164f4b-1f48-4f38-810b-3a3b636c48ed-kube-api-access-l6lb2" (OuterVolumeSpecName: "kube-api-access-l6lb2") pod "f8164f4b-1f48-4f38-810b-3a3b636c48ed" (UID: "f8164f4b-1f48-4f38-810b-3a3b636c48ed"). InnerVolumeSpecName "kube-api-access-l6lb2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.031281 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8164f4b-1f48-4f38-810b-3a3b636c48ed-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "f8164f4b-1f48-4f38-810b-3a3b636c48ed" (UID: "f8164f4b-1f48-4f38-810b-3a3b636c48ed"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.033450 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8164f4b-1f48-4f38-810b-3a3b636c48ed-inventory" (OuterVolumeSpecName: "inventory") pod "f8164f4b-1f48-4f38-810b-3a3b636c48ed" (UID: "f8164f4b-1f48-4f38-810b-3a3b636c48ed"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.045806 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4049fe04-7d20-41b8-b38c-9c0b39144fda" path="/var/lib/kubelet/pods/4049fe04-7d20-41b8-b38c-9c0b39144fda/volumes" Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.105310 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l6lb2\" (UniqueName: \"kubernetes.io/projected/f8164f4b-1f48-4f38-810b-3a3b636c48ed-kube-api-access-l6lb2\") on node \"crc\" DevicePath \"\"" Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.105359 4903 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f8164f4b-1f48-4f38-810b-3a3b636c48ed-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.105380 4903 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f8164f4b-1f48-4f38-810b-3a3b636c48ed-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.350599 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-sgr64" event={"ID":"f8164f4b-1f48-4f38-810b-3a3b636c48ed","Type":"ContainerDied","Data":"501c00eaeb75554399fd7aaff724f193c20ba598642cb743f1893e5278758661"} Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.350990 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="501c00eaeb75554399fd7aaff724f193c20ba598642cb743f1893e5278758661" Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.350739 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-sgr64" Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.442517 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6"] Nov 26 22:57:34 crc kubenswrapper[4903]: E1126 22:57:34.443442 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8164f4b-1f48-4f38-810b-3a3b636c48ed" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.443480 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8164f4b-1f48-4f38-810b-3a3b636c48ed" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.444019 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8164f4b-1f48-4f38-810b-3a3b636c48ed" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.445583 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6" Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.448382 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.448821 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.449109 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-62bf2" Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.449387 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.456218 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6"] Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.515301 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f442214b-4e84-4d9a-aa2c-9c3ae673ed4d-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6\" (UID: \"f442214b-4e84-4d9a-aa2c-9c3ae673ed4d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6" Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.515379 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f442214b-4e84-4d9a-aa2c-9c3ae673ed4d-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6\" (UID: \"f442214b-4e84-4d9a-aa2c-9c3ae673ed4d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6" Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.515447 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dnhfm\" (UniqueName: \"kubernetes.io/projected/f442214b-4e84-4d9a-aa2c-9c3ae673ed4d-kube-api-access-dnhfm\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6\" (UID: \"f442214b-4e84-4d9a-aa2c-9c3ae673ed4d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6" Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.618151 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dnhfm\" (UniqueName: \"kubernetes.io/projected/f442214b-4e84-4d9a-aa2c-9c3ae673ed4d-kube-api-access-dnhfm\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6\" (UID: \"f442214b-4e84-4d9a-aa2c-9c3ae673ed4d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6" Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.618361 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f442214b-4e84-4d9a-aa2c-9c3ae673ed4d-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6\" (UID: \"f442214b-4e84-4d9a-aa2c-9c3ae673ed4d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6" Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.618432 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f442214b-4e84-4d9a-aa2c-9c3ae673ed4d-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6\" (UID: \"f442214b-4e84-4d9a-aa2c-9c3ae673ed4d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6" Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.623762 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f442214b-4e84-4d9a-aa2c-9c3ae673ed4d-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6\" (UID: \"f442214b-4e84-4d9a-aa2c-9c3ae673ed4d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6" Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.634278 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f442214b-4e84-4d9a-aa2c-9c3ae673ed4d-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6\" (UID: \"f442214b-4e84-4d9a-aa2c-9c3ae673ed4d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6" Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.638907 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dnhfm\" (UniqueName: \"kubernetes.io/projected/f442214b-4e84-4d9a-aa2c-9c3ae673ed4d-kube-api-access-dnhfm\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6\" (UID: \"f442214b-4e84-4d9a-aa2c-9c3ae673ed4d\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6" Nov 26 22:57:34 crc kubenswrapper[4903]: I1126 22:57:34.771643 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6" Nov 26 22:57:35 crc kubenswrapper[4903]: I1126 22:57:35.429649 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6"] Nov 26 22:57:35 crc kubenswrapper[4903]: W1126 22:57:35.437000 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf442214b_4e84_4d9a_aa2c_9c3ae673ed4d.slice/crio-11ef57f3b672d3011c016a6ece4afb655881ac45de349345ab0916eeffda48de WatchSource:0}: Error finding container 11ef57f3b672d3011c016a6ece4afb655881ac45de349345ab0916eeffda48de: Status 404 returned error can't find the container with id 11ef57f3b672d3011c016a6ece4afb655881ac45de349345ab0916eeffda48de Nov 26 22:57:36 crc kubenswrapper[4903]: I1126 22:57:36.383246 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6" event={"ID":"f442214b-4e84-4d9a-aa2c-9c3ae673ed4d","Type":"ContainerStarted","Data":"4484dac766831b7ac1d388c893eccc99c2433be377a255c2c21baad3439077d3"} Nov 26 22:57:36 crc kubenswrapper[4903]: I1126 22:57:36.383639 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6" event={"ID":"f442214b-4e84-4d9a-aa2c-9c3ae673ed4d","Type":"ContainerStarted","Data":"11ef57f3b672d3011c016a6ece4afb655881ac45de349345ab0916eeffda48de"} Nov 26 22:57:36 crc kubenswrapper[4903]: I1126 22:57:36.412450 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6" podStartSLOduration=1.891854439 podStartE2EDuration="2.412431969s" podCreationTimestamp="2025-11-26 22:57:34 +0000 UTC" firstStartedPulling="2025-11-26 22:57:35.439920898 +0000 UTC m=+2184.130155818" lastFinishedPulling="2025-11-26 22:57:35.960498408 +0000 UTC m=+2184.650733348" observedRunningTime="2025-11-26 22:57:36.403242484 +0000 UTC m=+2185.093477414" watchObservedRunningTime="2025-11-26 22:57:36.412431969 +0000 UTC m=+2185.102666879" Nov 26 22:57:47 crc kubenswrapper[4903]: I1126 22:57:47.560960 4903 generic.go:334] "Generic (PLEG): container finished" podID="f442214b-4e84-4d9a-aa2c-9c3ae673ed4d" containerID="4484dac766831b7ac1d388c893eccc99c2433be377a255c2c21baad3439077d3" exitCode=0 Nov 26 22:57:47 crc kubenswrapper[4903]: I1126 22:57:47.561076 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6" event={"ID":"f442214b-4e84-4d9a-aa2c-9c3ae673ed4d","Type":"ContainerDied","Data":"4484dac766831b7ac1d388c893eccc99c2433be377a255c2c21baad3439077d3"} Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.206101 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.294098 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f442214b-4e84-4d9a-aa2c-9c3ae673ed4d-ssh-key\") pod \"f442214b-4e84-4d9a-aa2c-9c3ae673ed4d\" (UID: \"f442214b-4e84-4d9a-aa2c-9c3ae673ed4d\") " Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.294404 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f442214b-4e84-4d9a-aa2c-9c3ae673ed4d-inventory\") pod \"f442214b-4e84-4d9a-aa2c-9c3ae673ed4d\" (UID: \"f442214b-4e84-4d9a-aa2c-9c3ae673ed4d\") " Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.294507 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dnhfm\" (UniqueName: \"kubernetes.io/projected/f442214b-4e84-4d9a-aa2c-9c3ae673ed4d-kube-api-access-dnhfm\") pod \"f442214b-4e84-4d9a-aa2c-9c3ae673ed4d\" (UID: \"f442214b-4e84-4d9a-aa2c-9c3ae673ed4d\") " Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.301452 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f442214b-4e84-4d9a-aa2c-9c3ae673ed4d-kube-api-access-dnhfm" (OuterVolumeSpecName: "kube-api-access-dnhfm") pod "f442214b-4e84-4d9a-aa2c-9c3ae673ed4d" (UID: "f442214b-4e84-4d9a-aa2c-9c3ae673ed4d"). InnerVolumeSpecName "kube-api-access-dnhfm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.336833 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f442214b-4e84-4d9a-aa2c-9c3ae673ed4d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "f442214b-4e84-4d9a-aa2c-9c3ae673ed4d" (UID: "f442214b-4e84-4d9a-aa2c-9c3ae673ed4d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.340142 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f442214b-4e84-4d9a-aa2c-9c3ae673ed4d-inventory" (OuterVolumeSpecName: "inventory") pod "f442214b-4e84-4d9a-aa2c-9c3ae673ed4d" (UID: "f442214b-4e84-4d9a-aa2c-9c3ae673ed4d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.397799 4903 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f442214b-4e84-4d9a-aa2c-9c3ae673ed4d-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.397850 4903 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f442214b-4e84-4d9a-aa2c-9c3ae673ed4d-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.397871 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dnhfm\" (UniqueName: \"kubernetes.io/projected/f442214b-4e84-4d9a-aa2c-9c3ae673ed4d-kube-api-access-dnhfm\") on node \"crc\" DevicePath \"\"" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.598601 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6" event={"ID":"f442214b-4e84-4d9a-aa2c-9c3ae673ed4d","Type":"ContainerDied","Data":"11ef57f3b672d3011c016a6ece4afb655881ac45de349345ab0916eeffda48de"} Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.598685 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="11ef57f3b672d3011c016a6ece4afb655881ac45de349345ab0916eeffda48de" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.598688 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.733131 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6"] Nov 26 22:57:49 crc kubenswrapper[4903]: E1126 22:57:49.733734 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f442214b-4e84-4d9a-aa2c-9c3ae673ed4d" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.733760 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f442214b-4e84-4d9a-aa2c-9c3ae673ed4d" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.734108 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="f442214b-4e84-4d9a-aa2c-9c3ae673ed4d" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.735170 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.740130 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-62bf2" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.740429 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.740602 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.740836 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.740909 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.741031 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.741083 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.741168 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.741282 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.781148 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6"] Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.806981 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.807309 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.807362 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.807401 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.807534 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.807558 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.807592 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.807622 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.807656 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.807685 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.807727 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6kltx\" (UniqueName: \"kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-kube-api-access-6kltx\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.807799 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.807831 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.807886 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.807909 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.807935 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.909950 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.910009 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.910073 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.910103 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.910133 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.910205 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.910250 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.910304 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.910351 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.910412 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.910436 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.910467 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.910531 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.910571 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.910601 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.910626 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6kltx\" (UniqueName: \"kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-kube-api-access-6kltx\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.916744 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.917203 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.918218 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.918476 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.918647 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.920578 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.920717 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.920878 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.920998 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.922450 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.922882 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.923286 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.927208 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.932242 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.934581 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:49 crc kubenswrapper[4903]: I1126 22:57:49.936450 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6kltx\" (UniqueName: \"kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-kube-api-access-6kltx\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:50 crc kubenswrapper[4903]: I1126 22:57:50.117670 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:57:50 crc kubenswrapper[4903]: I1126 22:57:50.734912 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6"] Nov 26 22:57:51 crc kubenswrapper[4903]: I1126 22:57:51.623039 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" event={"ID":"ab43bf25-eae9-472d-80d9-0e91478c8302","Type":"ContainerStarted","Data":"0a748438479814e69b3b0874dc52a8589721b80922659559c8be6755c5391746"} Nov 26 22:57:52 crc kubenswrapper[4903]: I1126 22:57:52.639262 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" event={"ID":"ab43bf25-eae9-472d-80d9-0e91478c8302","Type":"ContainerStarted","Data":"ba1e6258d177e25d7c2e7173ad3a70516ca229b1ad7dc9e19ef2b13af539fd02"} Nov 26 22:57:52 crc kubenswrapper[4903]: I1126 22:57:52.664347 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" podStartSLOduration=2.891203464 podStartE2EDuration="3.66433086s" podCreationTimestamp="2025-11-26 22:57:49 +0000 UTC" firstStartedPulling="2025-11-26 22:57:50.748922528 +0000 UTC m=+2199.439157458" lastFinishedPulling="2025-11-26 22:57:51.522049944 +0000 UTC m=+2200.212284854" observedRunningTime="2025-11-26 22:57:52.657316663 +0000 UTC m=+2201.347551573" watchObservedRunningTime="2025-11-26 22:57:52.66433086 +0000 UTC m=+2201.354565770" Nov 26 22:58:01 crc kubenswrapper[4903]: I1126 22:58:01.981676 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 22:58:01 crc kubenswrapper[4903]: I1126 22:58:01.981756 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 22:58:01 crc kubenswrapper[4903]: I1126 22:58:01.981809 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 22:58:01 crc kubenswrapper[4903]: I1126 22:58:01.982874 4903 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ee7a8eeaefe3eb0640adea0e74cd3d203e8a5b31c2b6d9bcb6062010871eadbc"} pod="openshift-machine-config-operator/machine-config-daemon-wjwph" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 22:58:01 crc kubenswrapper[4903]: I1126 22:58:01.982962 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" containerID="cri-o://ee7a8eeaefe3eb0640adea0e74cd3d203e8a5b31c2b6d9bcb6062010871eadbc" gracePeriod=600 Nov 26 22:58:02 crc kubenswrapper[4903]: I1126 22:58:02.782379 4903 generic.go:334] "Generic (PLEG): container finished" podID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerID="ee7a8eeaefe3eb0640adea0e74cd3d203e8a5b31c2b6d9bcb6062010871eadbc" exitCode=0 Nov 26 22:58:02 crc kubenswrapper[4903]: I1126 22:58:02.782462 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerDied","Data":"ee7a8eeaefe3eb0640adea0e74cd3d203e8a5b31c2b6d9bcb6062010871eadbc"} Nov 26 22:58:02 crc kubenswrapper[4903]: I1126 22:58:02.782977 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerStarted","Data":"e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152"} Nov 26 22:58:02 crc kubenswrapper[4903]: I1126 22:58:02.783005 4903 scope.go:117] "RemoveContainer" containerID="5225d111a6887c6aff9adea34e52808b691574abede178f64ce796508df9a2c9" Nov 26 22:58:16 crc kubenswrapper[4903]: I1126 22:58:16.082178 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-cdw9p"] Nov 26 22:58:16 crc kubenswrapper[4903]: I1126 22:58:16.106491 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-cdw9p"] Nov 26 22:58:18 crc kubenswrapper[4903]: I1126 22:58:18.045872 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1307be14-ef92-4ac2-97c5-2851e1871a45" path="/var/lib/kubelet/pods/1307be14-ef92-4ac2-97c5-2851e1871a45/volumes" Nov 26 22:58:21 crc kubenswrapper[4903]: I1126 22:58:21.434775 4903 scope.go:117] "RemoveContainer" containerID="f9f34976525967f8eee08097ab70f55494bcd7244ed8ad63d8b1bb870e46197d" Nov 26 22:58:21 crc kubenswrapper[4903]: I1126 22:58:21.503746 4903 scope.go:117] "RemoveContainer" containerID="36775ec553d738035ad20c1f2e715106486713e54f3914df078813de829f61b9" Nov 26 22:58:47 crc kubenswrapper[4903]: I1126 22:58:47.461365 4903 generic.go:334] "Generic (PLEG): container finished" podID="ab43bf25-eae9-472d-80d9-0e91478c8302" containerID="ba1e6258d177e25d7c2e7173ad3a70516ca229b1ad7dc9e19ef2b13af539fd02" exitCode=0 Nov 26 22:58:47 crc kubenswrapper[4903]: I1126 22:58:47.461466 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" event={"ID":"ab43bf25-eae9-472d-80d9-0e91478c8302","Type":"ContainerDied","Data":"ba1e6258d177e25d7c2e7173ad3a70516ca229b1ad7dc9e19ef2b13af539fd02"} Nov 26 22:58:48 crc kubenswrapper[4903]: I1126 22:58:48.964350 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.114418 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-neutron-metadata-combined-ca-bundle\") pod \"ab43bf25-eae9-472d-80d9-0e91478c8302\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.115460 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-openstack-edpm-ipam-ovn-default-certs-0\") pod \"ab43bf25-eae9-472d-80d9-0e91478c8302\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.115511 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"ab43bf25-eae9-472d-80d9-0e91478c8302\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.115575 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-telemetry-power-monitoring-combined-ca-bundle\") pod \"ab43bf25-eae9-472d-80d9-0e91478c8302\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.115603 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"ab43bf25-eae9-472d-80d9-0e91478c8302\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.115665 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-libvirt-combined-ca-bundle\") pod \"ab43bf25-eae9-472d-80d9-0e91478c8302\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.115707 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-bootstrap-combined-ca-bundle\") pod \"ab43bf25-eae9-472d-80d9-0e91478c8302\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.115727 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-inventory\") pod \"ab43bf25-eae9-472d-80d9-0e91478c8302\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.115792 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"ab43bf25-eae9-472d-80d9-0e91478c8302\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.115847 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-ovn-combined-ca-bundle\") pod \"ab43bf25-eae9-472d-80d9-0e91478c8302\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.115880 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"ab43bf25-eae9-472d-80d9-0e91478c8302\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.115924 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-telemetry-combined-ca-bundle\") pod \"ab43bf25-eae9-472d-80d9-0e91478c8302\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.115947 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6kltx\" (UniqueName: \"kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-kube-api-access-6kltx\") pod \"ab43bf25-eae9-472d-80d9-0e91478c8302\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.115995 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-ssh-key\") pod \"ab43bf25-eae9-472d-80d9-0e91478c8302\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.116025 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-nova-combined-ca-bundle\") pod \"ab43bf25-eae9-472d-80d9-0e91478c8302\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.116069 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-repo-setup-combined-ca-bundle\") pod \"ab43bf25-eae9-472d-80d9-0e91478c8302\" (UID: \"ab43bf25-eae9-472d-80d9-0e91478c8302\") " Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.152525 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "ab43bf25-eae9-472d-80d9-0e91478c8302" (UID: "ab43bf25-eae9-472d-80d9-0e91478c8302"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.152614 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "ab43bf25-eae9-472d-80d9-0e91478c8302" (UID: "ab43bf25-eae9-472d-80d9-0e91478c8302"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.166165 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-telemetry-power-monitoring-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-power-monitoring-combined-ca-bundle") pod "ab43bf25-eae9-472d-80d9-0e91478c8302" (UID: "ab43bf25-eae9-472d-80d9-0e91478c8302"). InnerVolumeSpecName "telemetry-power-monitoring-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.166507 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "ab43bf25-eae9-472d-80d9-0e91478c8302" (UID: "ab43bf25-eae9-472d-80d9-0e91478c8302"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.167323 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "ab43bf25-eae9-472d-80d9-0e91478c8302" (UID: "ab43bf25-eae9-472d-80d9-0e91478c8302"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.167657 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "ab43bf25-eae9-472d-80d9-0e91478c8302" (UID: "ab43bf25-eae9-472d-80d9-0e91478c8302"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.167885 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-kube-api-access-6kltx" (OuterVolumeSpecName: "kube-api-access-6kltx") pod "ab43bf25-eae9-472d-80d9-0e91478c8302" (UID: "ab43bf25-eae9-472d-80d9-0e91478c8302"). InnerVolumeSpecName "kube-api-access-6kltx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.169815 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "ab43bf25-eae9-472d-80d9-0e91478c8302" (UID: "ab43bf25-eae9-472d-80d9-0e91478c8302"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.169863 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "ab43bf25-eae9-472d-80d9-0e91478c8302" (UID: "ab43bf25-eae9-472d-80d9-0e91478c8302"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.173890 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "ab43bf25-eae9-472d-80d9-0e91478c8302" (UID: "ab43bf25-eae9-472d-80d9-0e91478c8302"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.178080 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "ab43bf25-eae9-472d-80d9-0e91478c8302" (UID: "ab43bf25-eae9-472d-80d9-0e91478c8302"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.190223 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "ab43bf25-eae9-472d-80d9-0e91478c8302" (UID: "ab43bf25-eae9-472d-80d9-0e91478c8302"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.204093 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "ab43bf25-eae9-472d-80d9-0e91478c8302" (UID: "ab43bf25-eae9-472d-80d9-0e91478c8302"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.218819 4903 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.218848 4903 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.218858 4903 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.218869 4903 reconciler_common.go:293] "Volume detached for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-telemetry-power-monitoring-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.218882 4903 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.218891 4903 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.218900 4903 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.218924 4903 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.218932 4903 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.218942 4903 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.218951 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6kltx\" (UniqueName: \"kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-kube-api-access-6kltx\") on node \"crc\" DevicePath \"\"" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.218961 4903 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.218969 4903 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.219783 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0") pod "ab43bf25-eae9-472d-80d9-0e91478c8302" (UID: "ab43bf25-eae9-472d-80d9-0e91478c8302"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.223205 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ab43bf25-eae9-472d-80d9-0e91478c8302" (UID: "ab43bf25-eae9-472d-80d9-0e91478c8302"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.237728 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-inventory" (OuterVolumeSpecName: "inventory") pod "ab43bf25-eae9-472d-80d9-0e91478c8302" (UID: "ab43bf25-eae9-472d-80d9-0e91478c8302"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.320806 4903 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.321045 4903 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ab43bf25-eae9-472d-80d9-0e91478c8302-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.321057 4903 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab43bf25-eae9-472d-80d9-0e91478c8302-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.487685 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" event={"ID":"ab43bf25-eae9-472d-80d9-0e91478c8302","Type":"ContainerDied","Data":"0a748438479814e69b3b0874dc52a8589721b80922659559c8be6755c5391746"} Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.487755 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0a748438479814e69b3b0874dc52a8589721b80922659559c8be6755c5391746" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.487792 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.613615 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-wcwtk"] Nov 26 22:58:49 crc kubenswrapper[4903]: E1126 22:58:49.614320 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab43bf25-eae9-472d-80d9-0e91478c8302" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.614354 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab43bf25-eae9-472d-80d9-0e91478c8302" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.614907 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab43bf25-eae9-472d-80d9-0e91478c8302" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.616216 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wcwtk" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.618537 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.619577 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.619720 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.620896 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.621140 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-62bf2" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.635266 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-wcwtk"] Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.732155 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d791fd3c-48a9-44e4-85e2-9e0f088ecb6c-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wcwtk\" (UID: \"d791fd3c-48a9-44e4-85e2-9e0f088ecb6c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wcwtk" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.732279 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxkkh\" (UniqueName: \"kubernetes.io/projected/d791fd3c-48a9-44e4-85e2-9e0f088ecb6c-kube-api-access-nxkkh\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wcwtk\" (UID: \"d791fd3c-48a9-44e4-85e2-9e0f088ecb6c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wcwtk" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.732324 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/d791fd3c-48a9-44e4-85e2-9e0f088ecb6c-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wcwtk\" (UID: \"d791fd3c-48a9-44e4-85e2-9e0f088ecb6c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wcwtk" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.732674 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d791fd3c-48a9-44e4-85e2-9e0f088ecb6c-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wcwtk\" (UID: \"d791fd3c-48a9-44e4-85e2-9e0f088ecb6c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wcwtk" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.732735 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d791fd3c-48a9-44e4-85e2-9e0f088ecb6c-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wcwtk\" (UID: \"d791fd3c-48a9-44e4-85e2-9e0f088ecb6c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wcwtk" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.835617 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d791fd3c-48a9-44e4-85e2-9e0f088ecb6c-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wcwtk\" (UID: \"d791fd3c-48a9-44e4-85e2-9e0f088ecb6c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wcwtk" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.835802 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nxkkh\" (UniqueName: \"kubernetes.io/projected/d791fd3c-48a9-44e4-85e2-9e0f088ecb6c-kube-api-access-nxkkh\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wcwtk\" (UID: \"d791fd3c-48a9-44e4-85e2-9e0f088ecb6c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wcwtk" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.835913 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/d791fd3c-48a9-44e4-85e2-9e0f088ecb6c-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wcwtk\" (UID: \"d791fd3c-48a9-44e4-85e2-9e0f088ecb6c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wcwtk" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.836205 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d791fd3c-48a9-44e4-85e2-9e0f088ecb6c-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wcwtk\" (UID: \"d791fd3c-48a9-44e4-85e2-9e0f088ecb6c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wcwtk" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.836274 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d791fd3c-48a9-44e4-85e2-9e0f088ecb6c-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wcwtk\" (UID: \"d791fd3c-48a9-44e4-85e2-9e0f088ecb6c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wcwtk" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.837104 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/d791fd3c-48a9-44e4-85e2-9e0f088ecb6c-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wcwtk\" (UID: \"d791fd3c-48a9-44e4-85e2-9e0f088ecb6c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wcwtk" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.842047 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d791fd3c-48a9-44e4-85e2-9e0f088ecb6c-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wcwtk\" (UID: \"d791fd3c-48a9-44e4-85e2-9e0f088ecb6c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wcwtk" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.845547 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d791fd3c-48a9-44e4-85e2-9e0f088ecb6c-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wcwtk\" (UID: \"d791fd3c-48a9-44e4-85e2-9e0f088ecb6c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wcwtk" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.851036 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d791fd3c-48a9-44e4-85e2-9e0f088ecb6c-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wcwtk\" (UID: \"d791fd3c-48a9-44e4-85e2-9e0f088ecb6c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wcwtk" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.860348 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxkkh\" (UniqueName: \"kubernetes.io/projected/d791fd3c-48a9-44e4-85e2-9e0f088ecb6c-kube-api-access-nxkkh\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wcwtk\" (UID: \"d791fd3c-48a9-44e4-85e2-9e0f088ecb6c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wcwtk" Nov 26 22:58:49 crc kubenswrapper[4903]: I1126 22:58:49.934018 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wcwtk" Nov 26 22:58:50 crc kubenswrapper[4903]: I1126 22:58:50.568311 4903 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 22:58:50 crc kubenswrapper[4903]: I1126 22:58:50.575577 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-wcwtk"] Nov 26 22:58:51 crc kubenswrapper[4903]: I1126 22:58:51.528215 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wcwtk" event={"ID":"d791fd3c-48a9-44e4-85e2-9e0f088ecb6c","Type":"ContainerStarted","Data":"ce6f84ba7afbd469300e4e27e2521e8cf7bfc00d20878240d4ae42774c921562"} Nov 26 22:58:52 crc kubenswrapper[4903]: I1126 22:58:52.538483 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wcwtk" event={"ID":"d791fd3c-48a9-44e4-85e2-9e0f088ecb6c","Type":"ContainerStarted","Data":"3300f505b30d5c4b49d4469c876cfe48b1cd351709d1eefeb21c107c2cc1422d"} Nov 26 22:58:52 crc kubenswrapper[4903]: I1126 22:58:52.567774 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wcwtk" podStartSLOduration=2.811209886 podStartE2EDuration="3.567757039s" podCreationTimestamp="2025-11-26 22:58:49 +0000 UTC" firstStartedPulling="2025-11-26 22:58:50.568039719 +0000 UTC m=+2259.258274639" lastFinishedPulling="2025-11-26 22:58:51.324586852 +0000 UTC m=+2260.014821792" observedRunningTime="2025-11-26 22:58:52.563014613 +0000 UTC m=+2261.253249523" watchObservedRunningTime="2025-11-26 22:58:52.567757039 +0000 UTC m=+2261.257991949" Nov 26 22:59:06 crc kubenswrapper[4903]: I1126 22:59:06.575519 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-7tmgb"] Nov 26 22:59:06 crc kubenswrapper[4903]: I1126 22:59:06.580751 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7tmgb" Nov 26 22:59:06 crc kubenswrapper[4903]: I1126 22:59:06.607764 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0f6f421-9556-4066-b38c-76bf6ab640da-utilities\") pod \"certified-operators-7tmgb\" (UID: \"b0f6f421-9556-4066-b38c-76bf6ab640da\") " pod="openshift-marketplace/certified-operators-7tmgb" Nov 26 22:59:06 crc kubenswrapper[4903]: I1126 22:59:06.607902 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67jtv\" (UniqueName: \"kubernetes.io/projected/b0f6f421-9556-4066-b38c-76bf6ab640da-kube-api-access-67jtv\") pod \"certified-operators-7tmgb\" (UID: \"b0f6f421-9556-4066-b38c-76bf6ab640da\") " pod="openshift-marketplace/certified-operators-7tmgb" Nov 26 22:59:06 crc kubenswrapper[4903]: I1126 22:59:06.607997 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0f6f421-9556-4066-b38c-76bf6ab640da-catalog-content\") pod \"certified-operators-7tmgb\" (UID: \"b0f6f421-9556-4066-b38c-76bf6ab640da\") " pod="openshift-marketplace/certified-operators-7tmgb" Nov 26 22:59:06 crc kubenswrapper[4903]: I1126 22:59:06.626401 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7tmgb"] Nov 26 22:59:06 crc kubenswrapper[4903]: I1126 22:59:06.710251 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0f6f421-9556-4066-b38c-76bf6ab640da-utilities\") pod \"certified-operators-7tmgb\" (UID: \"b0f6f421-9556-4066-b38c-76bf6ab640da\") " pod="openshift-marketplace/certified-operators-7tmgb" Nov 26 22:59:06 crc kubenswrapper[4903]: I1126 22:59:06.710678 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67jtv\" (UniqueName: \"kubernetes.io/projected/b0f6f421-9556-4066-b38c-76bf6ab640da-kube-api-access-67jtv\") pod \"certified-operators-7tmgb\" (UID: \"b0f6f421-9556-4066-b38c-76bf6ab640da\") " pod="openshift-marketplace/certified-operators-7tmgb" Nov 26 22:59:06 crc kubenswrapper[4903]: I1126 22:59:06.710784 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0f6f421-9556-4066-b38c-76bf6ab640da-utilities\") pod \"certified-operators-7tmgb\" (UID: \"b0f6f421-9556-4066-b38c-76bf6ab640da\") " pod="openshift-marketplace/certified-operators-7tmgb" Nov 26 22:59:06 crc kubenswrapper[4903]: I1126 22:59:06.710901 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0f6f421-9556-4066-b38c-76bf6ab640da-catalog-content\") pod \"certified-operators-7tmgb\" (UID: \"b0f6f421-9556-4066-b38c-76bf6ab640da\") " pod="openshift-marketplace/certified-operators-7tmgb" Nov 26 22:59:06 crc kubenswrapper[4903]: I1126 22:59:06.711422 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0f6f421-9556-4066-b38c-76bf6ab640da-catalog-content\") pod \"certified-operators-7tmgb\" (UID: \"b0f6f421-9556-4066-b38c-76bf6ab640da\") " pod="openshift-marketplace/certified-operators-7tmgb" Nov 26 22:59:06 crc kubenswrapper[4903]: I1126 22:59:06.731457 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67jtv\" (UniqueName: \"kubernetes.io/projected/b0f6f421-9556-4066-b38c-76bf6ab640da-kube-api-access-67jtv\") pod \"certified-operators-7tmgb\" (UID: \"b0f6f421-9556-4066-b38c-76bf6ab640da\") " pod="openshift-marketplace/certified-operators-7tmgb" Nov 26 22:59:06 crc kubenswrapper[4903]: I1126 22:59:06.920044 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7tmgb" Nov 26 22:59:07 crc kubenswrapper[4903]: W1126 22:59:07.500209 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0f6f421_9556_4066_b38c_76bf6ab640da.slice/crio-31770516a64f40c56de4f4def533a8bfb1783fc2099d265c57b484b8e0fb443b WatchSource:0}: Error finding container 31770516a64f40c56de4f4def533a8bfb1783fc2099d265c57b484b8e0fb443b: Status 404 returned error can't find the container with id 31770516a64f40c56de4f4def533a8bfb1783fc2099d265c57b484b8e0fb443b Nov 26 22:59:07 crc kubenswrapper[4903]: I1126 22:59:07.510921 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7tmgb"] Nov 26 22:59:07 crc kubenswrapper[4903]: I1126 22:59:07.740956 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7tmgb" event={"ID":"b0f6f421-9556-4066-b38c-76bf6ab640da","Type":"ContainerStarted","Data":"116df4ecad14dc7ada50bd2872a609b5f99b81f852f4113f9f69f6e9b4524558"} Nov 26 22:59:07 crc kubenswrapper[4903]: I1126 22:59:07.741217 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7tmgb" event={"ID":"b0f6f421-9556-4066-b38c-76bf6ab640da","Type":"ContainerStarted","Data":"31770516a64f40c56de4f4def533a8bfb1783fc2099d265c57b484b8e0fb443b"} Nov 26 22:59:08 crc kubenswrapper[4903]: I1126 22:59:08.752631 4903 generic.go:334] "Generic (PLEG): container finished" podID="b0f6f421-9556-4066-b38c-76bf6ab640da" containerID="116df4ecad14dc7ada50bd2872a609b5f99b81f852f4113f9f69f6e9b4524558" exitCode=0 Nov 26 22:59:08 crc kubenswrapper[4903]: I1126 22:59:08.752701 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7tmgb" event={"ID":"b0f6f421-9556-4066-b38c-76bf6ab640da","Type":"ContainerDied","Data":"116df4ecad14dc7ada50bd2872a609b5f99b81f852f4113f9f69f6e9b4524558"} Nov 26 22:59:09 crc kubenswrapper[4903]: I1126 22:59:09.770293 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7tmgb" event={"ID":"b0f6f421-9556-4066-b38c-76bf6ab640da","Type":"ContainerStarted","Data":"3f698b254c35a8c9898c39d637d39c9e4b8fd8bae4746c0d43a61a640601b250"} Nov 26 22:59:10 crc kubenswrapper[4903]: I1126 22:59:10.160596 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dpkrz"] Nov 26 22:59:10 crc kubenswrapper[4903]: I1126 22:59:10.168030 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dpkrz" Nov 26 22:59:10 crc kubenswrapper[4903]: I1126 22:59:10.183991 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dpkrz"] Nov 26 22:59:10 crc kubenswrapper[4903]: I1126 22:59:10.208788 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f8786fb-a534-48a4-b2a5-9674f2fd5d46-catalog-content\") pod \"redhat-marketplace-dpkrz\" (UID: \"3f8786fb-a534-48a4-b2a5-9674f2fd5d46\") " pod="openshift-marketplace/redhat-marketplace-dpkrz" Nov 26 22:59:10 crc kubenswrapper[4903]: I1126 22:59:10.208904 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f8786fb-a534-48a4-b2a5-9674f2fd5d46-utilities\") pod \"redhat-marketplace-dpkrz\" (UID: \"3f8786fb-a534-48a4-b2a5-9674f2fd5d46\") " pod="openshift-marketplace/redhat-marketplace-dpkrz" Nov 26 22:59:10 crc kubenswrapper[4903]: I1126 22:59:10.209144 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bhklj\" (UniqueName: \"kubernetes.io/projected/3f8786fb-a534-48a4-b2a5-9674f2fd5d46-kube-api-access-bhklj\") pod \"redhat-marketplace-dpkrz\" (UID: \"3f8786fb-a534-48a4-b2a5-9674f2fd5d46\") " pod="openshift-marketplace/redhat-marketplace-dpkrz" Nov 26 22:59:10 crc kubenswrapper[4903]: I1126 22:59:10.311358 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bhklj\" (UniqueName: \"kubernetes.io/projected/3f8786fb-a534-48a4-b2a5-9674f2fd5d46-kube-api-access-bhklj\") pod \"redhat-marketplace-dpkrz\" (UID: \"3f8786fb-a534-48a4-b2a5-9674f2fd5d46\") " pod="openshift-marketplace/redhat-marketplace-dpkrz" Nov 26 22:59:10 crc kubenswrapper[4903]: I1126 22:59:10.311487 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f8786fb-a534-48a4-b2a5-9674f2fd5d46-catalog-content\") pod \"redhat-marketplace-dpkrz\" (UID: \"3f8786fb-a534-48a4-b2a5-9674f2fd5d46\") " pod="openshift-marketplace/redhat-marketplace-dpkrz" Nov 26 22:59:10 crc kubenswrapper[4903]: I1126 22:59:10.311531 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f8786fb-a534-48a4-b2a5-9674f2fd5d46-utilities\") pod \"redhat-marketplace-dpkrz\" (UID: \"3f8786fb-a534-48a4-b2a5-9674f2fd5d46\") " pod="openshift-marketplace/redhat-marketplace-dpkrz" Nov 26 22:59:10 crc kubenswrapper[4903]: I1126 22:59:10.311953 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f8786fb-a534-48a4-b2a5-9674f2fd5d46-utilities\") pod \"redhat-marketplace-dpkrz\" (UID: \"3f8786fb-a534-48a4-b2a5-9674f2fd5d46\") " pod="openshift-marketplace/redhat-marketplace-dpkrz" Nov 26 22:59:10 crc kubenswrapper[4903]: I1126 22:59:10.312223 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f8786fb-a534-48a4-b2a5-9674f2fd5d46-catalog-content\") pod \"redhat-marketplace-dpkrz\" (UID: \"3f8786fb-a534-48a4-b2a5-9674f2fd5d46\") " pod="openshift-marketplace/redhat-marketplace-dpkrz" Nov 26 22:59:10 crc kubenswrapper[4903]: I1126 22:59:10.335039 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bhklj\" (UniqueName: \"kubernetes.io/projected/3f8786fb-a534-48a4-b2a5-9674f2fd5d46-kube-api-access-bhklj\") pod \"redhat-marketplace-dpkrz\" (UID: \"3f8786fb-a534-48a4-b2a5-9674f2fd5d46\") " pod="openshift-marketplace/redhat-marketplace-dpkrz" Nov 26 22:59:10 crc kubenswrapper[4903]: I1126 22:59:10.489305 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dpkrz" Nov 26 22:59:11 crc kubenswrapper[4903]: W1126 22:59:11.030809 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3f8786fb_a534_48a4_b2a5_9674f2fd5d46.slice/crio-d95b6d90aa90a0e0bf399303b2c4b97d5698d033e5304e45665120db543831ef WatchSource:0}: Error finding container d95b6d90aa90a0e0bf399303b2c4b97d5698d033e5304e45665120db543831ef: Status 404 returned error can't find the container with id d95b6d90aa90a0e0bf399303b2c4b97d5698d033e5304e45665120db543831ef Nov 26 22:59:11 crc kubenswrapper[4903]: I1126 22:59:11.031018 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dpkrz"] Nov 26 22:59:11 crc kubenswrapper[4903]: I1126 22:59:11.797038 4903 generic.go:334] "Generic (PLEG): container finished" podID="3f8786fb-a534-48a4-b2a5-9674f2fd5d46" containerID="e7667282ce349d78c2e7952f274194c912a249106ac07c37a8c31a2870113f8d" exitCode=0 Nov 26 22:59:11 crc kubenswrapper[4903]: I1126 22:59:11.797170 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dpkrz" event={"ID":"3f8786fb-a534-48a4-b2a5-9674f2fd5d46","Type":"ContainerDied","Data":"e7667282ce349d78c2e7952f274194c912a249106ac07c37a8c31a2870113f8d"} Nov 26 22:59:11 crc kubenswrapper[4903]: I1126 22:59:11.797283 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dpkrz" event={"ID":"3f8786fb-a534-48a4-b2a5-9674f2fd5d46","Type":"ContainerStarted","Data":"d95b6d90aa90a0e0bf399303b2c4b97d5698d033e5304e45665120db543831ef"} Nov 26 22:59:11 crc kubenswrapper[4903]: I1126 22:59:11.800003 4903 generic.go:334] "Generic (PLEG): container finished" podID="b0f6f421-9556-4066-b38c-76bf6ab640da" containerID="3f698b254c35a8c9898c39d637d39c9e4b8fd8bae4746c0d43a61a640601b250" exitCode=0 Nov 26 22:59:11 crc kubenswrapper[4903]: I1126 22:59:11.800050 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7tmgb" event={"ID":"b0f6f421-9556-4066-b38c-76bf6ab640da","Type":"ContainerDied","Data":"3f698b254c35a8c9898c39d637d39c9e4b8fd8bae4746c0d43a61a640601b250"} Nov 26 22:59:12 crc kubenswrapper[4903]: I1126 22:59:12.818420 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7tmgb" event={"ID":"b0f6f421-9556-4066-b38c-76bf6ab640da","Type":"ContainerStarted","Data":"e5a77917db30ded3b7c7368affe5b4da36c92e1f3e42939915575b3064a9a6e2"} Nov 26 22:59:12 crc kubenswrapper[4903]: I1126 22:59:12.852511 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-7tmgb" podStartSLOduration=3.303430205 podStartE2EDuration="6.852488317s" podCreationTimestamp="2025-11-26 22:59:06 +0000 UTC" firstStartedPulling="2025-11-26 22:59:08.754732895 +0000 UTC m=+2277.444967805" lastFinishedPulling="2025-11-26 22:59:12.303790987 +0000 UTC m=+2280.994025917" observedRunningTime="2025-11-26 22:59:12.843662701 +0000 UTC m=+2281.533897611" watchObservedRunningTime="2025-11-26 22:59:12.852488317 +0000 UTC m=+2281.542723237" Nov 26 22:59:13 crc kubenswrapper[4903]: I1126 22:59:13.835778 4903 generic.go:334] "Generic (PLEG): container finished" podID="3f8786fb-a534-48a4-b2a5-9674f2fd5d46" containerID="5c4bcb63f0519b553445e0a7529c09cd3363c474c4569756b45f2bf3ed869d7b" exitCode=0 Nov 26 22:59:13 crc kubenswrapper[4903]: I1126 22:59:13.835812 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dpkrz" event={"ID":"3f8786fb-a534-48a4-b2a5-9674f2fd5d46","Type":"ContainerDied","Data":"5c4bcb63f0519b553445e0a7529c09cd3363c474c4569756b45f2bf3ed869d7b"} Nov 26 22:59:15 crc kubenswrapper[4903]: I1126 22:59:15.867523 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dpkrz" event={"ID":"3f8786fb-a534-48a4-b2a5-9674f2fd5d46","Type":"ContainerStarted","Data":"5edadbdee654f61045b1d633f5784c3f0f514e5170035f74962da01589dde42d"} Nov 26 22:59:15 crc kubenswrapper[4903]: I1126 22:59:15.926178 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dpkrz" podStartSLOduration=3.12938984 podStartE2EDuration="5.926145752s" podCreationTimestamp="2025-11-26 22:59:10 +0000 UTC" firstStartedPulling="2025-11-26 22:59:11.799240663 +0000 UTC m=+2280.489475583" lastFinishedPulling="2025-11-26 22:59:14.595996575 +0000 UTC m=+2283.286231495" observedRunningTime="2025-11-26 22:59:15.894132799 +0000 UTC m=+2284.584367749" watchObservedRunningTime="2025-11-26 22:59:15.926145752 +0000 UTC m=+2284.616380672" Nov 26 22:59:16 crc kubenswrapper[4903]: I1126 22:59:16.920961 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-7tmgb" Nov 26 22:59:16 crc kubenswrapper[4903]: I1126 22:59:16.921406 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-7tmgb" Nov 26 22:59:17 crc kubenswrapper[4903]: I1126 22:59:17.019748 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-7tmgb" Nov 26 22:59:18 crc kubenswrapper[4903]: I1126 22:59:18.018379 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-7tmgb" Nov 26 22:59:18 crc kubenswrapper[4903]: I1126 22:59:18.563996 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7tmgb"] Nov 26 22:59:19 crc kubenswrapper[4903]: I1126 22:59:19.929469 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-7tmgb" podUID="b0f6f421-9556-4066-b38c-76bf6ab640da" containerName="registry-server" containerID="cri-o://e5a77917db30ded3b7c7368affe5b4da36c92e1f3e42939915575b3064a9a6e2" gracePeriod=2 Nov 26 22:59:20 crc kubenswrapper[4903]: I1126 22:59:20.480659 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7tmgb" Nov 26 22:59:20 crc kubenswrapper[4903]: I1126 22:59:20.490268 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dpkrz" Nov 26 22:59:20 crc kubenswrapper[4903]: I1126 22:59:20.490329 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dpkrz" Nov 26 22:59:20 crc kubenswrapper[4903]: I1126 22:59:20.559743 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dpkrz" Nov 26 22:59:20 crc kubenswrapper[4903]: I1126 22:59:20.595022 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0f6f421-9556-4066-b38c-76bf6ab640da-utilities\") pod \"b0f6f421-9556-4066-b38c-76bf6ab640da\" (UID: \"b0f6f421-9556-4066-b38c-76bf6ab640da\") " Nov 26 22:59:20 crc kubenswrapper[4903]: I1126 22:59:20.595206 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0f6f421-9556-4066-b38c-76bf6ab640da-catalog-content\") pod \"b0f6f421-9556-4066-b38c-76bf6ab640da\" (UID: \"b0f6f421-9556-4066-b38c-76bf6ab640da\") " Nov 26 22:59:20 crc kubenswrapper[4903]: I1126 22:59:20.595268 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-67jtv\" (UniqueName: \"kubernetes.io/projected/b0f6f421-9556-4066-b38c-76bf6ab640da-kube-api-access-67jtv\") pod \"b0f6f421-9556-4066-b38c-76bf6ab640da\" (UID: \"b0f6f421-9556-4066-b38c-76bf6ab640da\") " Nov 26 22:59:20 crc kubenswrapper[4903]: I1126 22:59:20.596576 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b0f6f421-9556-4066-b38c-76bf6ab640da-utilities" (OuterVolumeSpecName: "utilities") pod "b0f6f421-9556-4066-b38c-76bf6ab640da" (UID: "b0f6f421-9556-4066-b38c-76bf6ab640da"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:59:20 crc kubenswrapper[4903]: I1126 22:59:20.602871 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0f6f421-9556-4066-b38c-76bf6ab640da-kube-api-access-67jtv" (OuterVolumeSpecName: "kube-api-access-67jtv") pod "b0f6f421-9556-4066-b38c-76bf6ab640da" (UID: "b0f6f421-9556-4066-b38c-76bf6ab640da"). InnerVolumeSpecName "kube-api-access-67jtv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:59:20 crc kubenswrapper[4903]: I1126 22:59:20.657982 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b0f6f421-9556-4066-b38c-76bf6ab640da-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b0f6f421-9556-4066-b38c-76bf6ab640da" (UID: "b0f6f421-9556-4066-b38c-76bf6ab640da"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:59:20 crc kubenswrapper[4903]: I1126 22:59:20.698472 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0f6f421-9556-4066-b38c-76bf6ab640da-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 22:59:20 crc kubenswrapper[4903]: I1126 22:59:20.698520 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0f6f421-9556-4066-b38c-76bf6ab640da-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 22:59:20 crc kubenswrapper[4903]: I1126 22:59:20.698544 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-67jtv\" (UniqueName: \"kubernetes.io/projected/b0f6f421-9556-4066-b38c-76bf6ab640da-kube-api-access-67jtv\") on node \"crc\" DevicePath \"\"" Nov 26 22:59:20 crc kubenswrapper[4903]: I1126 22:59:20.944988 4903 generic.go:334] "Generic (PLEG): container finished" podID="b0f6f421-9556-4066-b38c-76bf6ab640da" containerID="e5a77917db30ded3b7c7368affe5b4da36c92e1f3e42939915575b3064a9a6e2" exitCode=0 Nov 26 22:59:20 crc kubenswrapper[4903]: I1126 22:59:20.945096 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7tmgb" event={"ID":"b0f6f421-9556-4066-b38c-76bf6ab640da","Type":"ContainerDied","Data":"e5a77917db30ded3b7c7368affe5b4da36c92e1f3e42939915575b3064a9a6e2"} Nov 26 22:59:20 crc kubenswrapper[4903]: I1126 22:59:20.945145 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7tmgb" event={"ID":"b0f6f421-9556-4066-b38c-76bf6ab640da","Type":"ContainerDied","Data":"31770516a64f40c56de4f4def533a8bfb1783fc2099d265c57b484b8e0fb443b"} Nov 26 22:59:20 crc kubenswrapper[4903]: I1126 22:59:20.945177 4903 scope.go:117] "RemoveContainer" containerID="e5a77917db30ded3b7c7368affe5b4da36c92e1f3e42939915575b3064a9a6e2" Nov 26 22:59:20 crc kubenswrapper[4903]: I1126 22:59:20.946172 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7tmgb" Nov 26 22:59:20 crc kubenswrapper[4903]: I1126 22:59:20.996122 4903 scope.go:117] "RemoveContainer" containerID="3f698b254c35a8c9898c39d637d39c9e4b8fd8bae4746c0d43a61a640601b250" Nov 26 22:59:21 crc kubenswrapper[4903]: I1126 22:59:21.008658 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dpkrz" Nov 26 22:59:21 crc kubenswrapper[4903]: I1126 22:59:21.014041 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7tmgb"] Nov 26 22:59:21 crc kubenswrapper[4903]: I1126 22:59:21.032651 4903 scope.go:117] "RemoveContainer" containerID="116df4ecad14dc7ada50bd2872a609b5f99b81f852f4113f9f69f6e9b4524558" Nov 26 22:59:21 crc kubenswrapper[4903]: I1126 22:59:21.038921 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-7tmgb"] Nov 26 22:59:21 crc kubenswrapper[4903]: I1126 22:59:21.082834 4903 scope.go:117] "RemoveContainer" containerID="e5a77917db30ded3b7c7368affe5b4da36c92e1f3e42939915575b3064a9a6e2" Nov 26 22:59:21 crc kubenswrapper[4903]: E1126 22:59:21.083344 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5a77917db30ded3b7c7368affe5b4da36c92e1f3e42939915575b3064a9a6e2\": container with ID starting with e5a77917db30ded3b7c7368affe5b4da36c92e1f3e42939915575b3064a9a6e2 not found: ID does not exist" containerID="e5a77917db30ded3b7c7368affe5b4da36c92e1f3e42939915575b3064a9a6e2" Nov 26 22:59:21 crc kubenswrapper[4903]: I1126 22:59:21.083387 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5a77917db30ded3b7c7368affe5b4da36c92e1f3e42939915575b3064a9a6e2"} err="failed to get container status \"e5a77917db30ded3b7c7368affe5b4da36c92e1f3e42939915575b3064a9a6e2\": rpc error: code = NotFound desc = could not find container \"e5a77917db30ded3b7c7368affe5b4da36c92e1f3e42939915575b3064a9a6e2\": container with ID starting with e5a77917db30ded3b7c7368affe5b4da36c92e1f3e42939915575b3064a9a6e2 not found: ID does not exist" Nov 26 22:59:21 crc kubenswrapper[4903]: I1126 22:59:21.083419 4903 scope.go:117] "RemoveContainer" containerID="3f698b254c35a8c9898c39d637d39c9e4b8fd8bae4746c0d43a61a640601b250" Nov 26 22:59:21 crc kubenswrapper[4903]: E1126 22:59:21.084040 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f698b254c35a8c9898c39d637d39c9e4b8fd8bae4746c0d43a61a640601b250\": container with ID starting with 3f698b254c35a8c9898c39d637d39c9e4b8fd8bae4746c0d43a61a640601b250 not found: ID does not exist" containerID="3f698b254c35a8c9898c39d637d39c9e4b8fd8bae4746c0d43a61a640601b250" Nov 26 22:59:21 crc kubenswrapper[4903]: I1126 22:59:21.084224 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f698b254c35a8c9898c39d637d39c9e4b8fd8bae4746c0d43a61a640601b250"} err="failed to get container status \"3f698b254c35a8c9898c39d637d39c9e4b8fd8bae4746c0d43a61a640601b250\": rpc error: code = NotFound desc = could not find container \"3f698b254c35a8c9898c39d637d39c9e4b8fd8bae4746c0d43a61a640601b250\": container with ID starting with 3f698b254c35a8c9898c39d637d39c9e4b8fd8bae4746c0d43a61a640601b250 not found: ID does not exist" Nov 26 22:59:21 crc kubenswrapper[4903]: I1126 22:59:21.084265 4903 scope.go:117] "RemoveContainer" containerID="116df4ecad14dc7ada50bd2872a609b5f99b81f852f4113f9f69f6e9b4524558" Nov 26 22:59:21 crc kubenswrapper[4903]: E1126 22:59:21.084649 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"116df4ecad14dc7ada50bd2872a609b5f99b81f852f4113f9f69f6e9b4524558\": container with ID starting with 116df4ecad14dc7ada50bd2872a609b5f99b81f852f4113f9f69f6e9b4524558 not found: ID does not exist" containerID="116df4ecad14dc7ada50bd2872a609b5f99b81f852f4113f9f69f6e9b4524558" Nov 26 22:59:21 crc kubenswrapper[4903]: I1126 22:59:21.084684 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"116df4ecad14dc7ada50bd2872a609b5f99b81f852f4113f9f69f6e9b4524558"} err="failed to get container status \"116df4ecad14dc7ada50bd2872a609b5f99b81f852f4113f9f69f6e9b4524558\": rpc error: code = NotFound desc = could not find container \"116df4ecad14dc7ada50bd2872a609b5f99b81f852f4113f9f69f6e9b4524558\": container with ID starting with 116df4ecad14dc7ada50bd2872a609b5f99b81f852f4113f9f69f6e9b4524558 not found: ID does not exist" Nov 26 22:59:22 crc kubenswrapper[4903]: I1126 22:59:22.053296 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0f6f421-9556-4066-b38c-76bf6ab640da" path="/var/lib/kubelet/pods/b0f6f421-9556-4066-b38c-76bf6ab640da/volumes" Nov 26 22:59:22 crc kubenswrapper[4903]: I1126 22:59:22.954078 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dpkrz"] Nov 26 22:59:22 crc kubenswrapper[4903]: I1126 22:59:22.971560 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dpkrz" podUID="3f8786fb-a534-48a4-b2a5-9674f2fd5d46" containerName="registry-server" containerID="cri-o://5edadbdee654f61045b1d633f5784c3f0f514e5170035f74962da01589dde42d" gracePeriod=2 Nov 26 22:59:23 crc kubenswrapper[4903]: I1126 22:59:23.614855 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dpkrz" Nov 26 22:59:23 crc kubenswrapper[4903]: I1126 22:59:23.683607 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f8786fb-a534-48a4-b2a5-9674f2fd5d46-utilities\") pod \"3f8786fb-a534-48a4-b2a5-9674f2fd5d46\" (UID: \"3f8786fb-a534-48a4-b2a5-9674f2fd5d46\") " Nov 26 22:59:23 crc kubenswrapper[4903]: I1126 22:59:23.683788 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f8786fb-a534-48a4-b2a5-9674f2fd5d46-catalog-content\") pod \"3f8786fb-a534-48a4-b2a5-9674f2fd5d46\" (UID: \"3f8786fb-a534-48a4-b2a5-9674f2fd5d46\") " Nov 26 22:59:23 crc kubenswrapper[4903]: I1126 22:59:23.683812 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bhklj\" (UniqueName: \"kubernetes.io/projected/3f8786fb-a534-48a4-b2a5-9674f2fd5d46-kube-api-access-bhklj\") pod \"3f8786fb-a534-48a4-b2a5-9674f2fd5d46\" (UID: \"3f8786fb-a534-48a4-b2a5-9674f2fd5d46\") " Nov 26 22:59:23 crc kubenswrapper[4903]: I1126 22:59:23.685657 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f8786fb-a534-48a4-b2a5-9674f2fd5d46-utilities" (OuterVolumeSpecName: "utilities") pod "3f8786fb-a534-48a4-b2a5-9674f2fd5d46" (UID: "3f8786fb-a534-48a4-b2a5-9674f2fd5d46"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:59:23 crc kubenswrapper[4903]: I1126 22:59:23.690828 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f8786fb-a534-48a4-b2a5-9674f2fd5d46-kube-api-access-bhklj" (OuterVolumeSpecName: "kube-api-access-bhklj") pod "3f8786fb-a534-48a4-b2a5-9674f2fd5d46" (UID: "3f8786fb-a534-48a4-b2a5-9674f2fd5d46"). InnerVolumeSpecName "kube-api-access-bhklj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 22:59:23 crc kubenswrapper[4903]: I1126 22:59:23.702976 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f8786fb-a534-48a4-b2a5-9674f2fd5d46-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3f8786fb-a534-48a4-b2a5-9674f2fd5d46" (UID: "3f8786fb-a534-48a4-b2a5-9674f2fd5d46"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 22:59:23 crc kubenswrapper[4903]: I1126 22:59:23.785300 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f8786fb-a534-48a4-b2a5-9674f2fd5d46-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 22:59:23 crc kubenswrapper[4903]: I1126 22:59:23.785331 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f8786fb-a534-48a4-b2a5-9674f2fd5d46-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 22:59:23 crc kubenswrapper[4903]: I1126 22:59:23.785344 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bhklj\" (UniqueName: \"kubernetes.io/projected/3f8786fb-a534-48a4-b2a5-9674f2fd5d46-kube-api-access-bhklj\") on node \"crc\" DevicePath \"\"" Nov 26 22:59:23 crc kubenswrapper[4903]: I1126 22:59:23.986167 4903 generic.go:334] "Generic (PLEG): container finished" podID="3f8786fb-a534-48a4-b2a5-9674f2fd5d46" containerID="5edadbdee654f61045b1d633f5784c3f0f514e5170035f74962da01589dde42d" exitCode=0 Nov 26 22:59:23 crc kubenswrapper[4903]: I1126 22:59:23.986244 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dpkrz" Nov 26 22:59:23 crc kubenswrapper[4903]: I1126 22:59:23.986289 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dpkrz" event={"ID":"3f8786fb-a534-48a4-b2a5-9674f2fd5d46","Type":"ContainerDied","Data":"5edadbdee654f61045b1d633f5784c3f0f514e5170035f74962da01589dde42d"} Nov 26 22:59:23 crc kubenswrapper[4903]: I1126 22:59:23.986364 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dpkrz" event={"ID":"3f8786fb-a534-48a4-b2a5-9674f2fd5d46","Type":"ContainerDied","Data":"d95b6d90aa90a0e0bf399303b2c4b97d5698d033e5304e45665120db543831ef"} Nov 26 22:59:23 crc kubenswrapper[4903]: I1126 22:59:23.986400 4903 scope.go:117] "RemoveContainer" containerID="5edadbdee654f61045b1d633f5784c3f0f514e5170035f74962da01589dde42d" Nov 26 22:59:24 crc kubenswrapper[4903]: I1126 22:59:24.034402 4903 scope.go:117] "RemoveContainer" containerID="5c4bcb63f0519b553445e0a7529c09cd3363c474c4569756b45f2bf3ed869d7b" Nov 26 22:59:24 crc kubenswrapper[4903]: I1126 22:59:24.049381 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dpkrz"] Nov 26 22:59:24 crc kubenswrapper[4903]: I1126 22:59:24.066802 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dpkrz"] Nov 26 22:59:24 crc kubenswrapper[4903]: I1126 22:59:24.087923 4903 scope.go:117] "RemoveContainer" containerID="e7667282ce349d78c2e7952f274194c912a249106ac07c37a8c31a2870113f8d" Nov 26 22:59:24 crc kubenswrapper[4903]: I1126 22:59:24.139243 4903 scope.go:117] "RemoveContainer" containerID="5edadbdee654f61045b1d633f5784c3f0f514e5170035f74962da01589dde42d" Nov 26 22:59:24 crc kubenswrapper[4903]: E1126 22:59:24.146618 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5edadbdee654f61045b1d633f5784c3f0f514e5170035f74962da01589dde42d\": container with ID starting with 5edadbdee654f61045b1d633f5784c3f0f514e5170035f74962da01589dde42d not found: ID does not exist" containerID="5edadbdee654f61045b1d633f5784c3f0f514e5170035f74962da01589dde42d" Nov 26 22:59:24 crc kubenswrapper[4903]: I1126 22:59:24.146741 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5edadbdee654f61045b1d633f5784c3f0f514e5170035f74962da01589dde42d"} err="failed to get container status \"5edadbdee654f61045b1d633f5784c3f0f514e5170035f74962da01589dde42d\": rpc error: code = NotFound desc = could not find container \"5edadbdee654f61045b1d633f5784c3f0f514e5170035f74962da01589dde42d\": container with ID starting with 5edadbdee654f61045b1d633f5784c3f0f514e5170035f74962da01589dde42d not found: ID does not exist" Nov 26 22:59:24 crc kubenswrapper[4903]: I1126 22:59:24.146787 4903 scope.go:117] "RemoveContainer" containerID="5c4bcb63f0519b553445e0a7529c09cd3363c474c4569756b45f2bf3ed869d7b" Nov 26 22:59:24 crc kubenswrapper[4903]: E1126 22:59:24.151824 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5c4bcb63f0519b553445e0a7529c09cd3363c474c4569756b45f2bf3ed869d7b\": container with ID starting with 5c4bcb63f0519b553445e0a7529c09cd3363c474c4569756b45f2bf3ed869d7b not found: ID does not exist" containerID="5c4bcb63f0519b553445e0a7529c09cd3363c474c4569756b45f2bf3ed869d7b" Nov 26 22:59:24 crc kubenswrapper[4903]: I1126 22:59:24.151872 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c4bcb63f0519b553445e0a7529c09cd3363c474c4569756b45f2bf3ed869d7b"} err="failed to get container status \"5c4bcb63f0519b553445e0a7529c09cd3363c474c4569756b45f2bf3ed869d7b\": rpc error: code = NotFound desc = could not find container \"5c4bcb63f0519b553445e0a7529c09cd3363c474c4569756b45f2bf3ed869d7b\": container with ID starting with 5c4bcb63f0519b553445e0a7529c09cd3363c474c4569756b45f2bf3ed869d7b not found: ID does not exist" Nov 26 22:59:24 crc kubenswrapper[4903]: I1126 22:59:24.151905 4903 scope.go:117] "RemoveContainer" containerID="e7667282ce349d78c2e7952f274194c912a249106ac07c37a8c31a2870113f8d" Nov 26 22:59:24 crc kubenswrapper[4903]: E1126 22:59:24.152530 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e7667282ce349d78c2e7952f274194c912a249106ac07c37a8c31a2870113f8d\": container with ID starting with e7667282ce349d78c2e7952f274194c912a249106ac07c37a8c31a2870113f8d not found: ID does not exist" containerID="e7667282ce349d78c2e7952f274194c912a249106ac07c37a8c31a2870113f8d" Nov 26 22:59:24 crc kubenswrapper[4903]: I1126 22:59:24.152579 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7667282ce349d78c2e7952f274194c912a249106ac07c37a8c31a2870113f8d"} err="failed to get container status \"e7667282ce349d78c2e7952f274194c912a249106ac07c37a8c31a2870113f8d\": rpc error: code = NotFound desc = could not find container \"e7667282ce349d78c2e7952f274194c912a249106ac07c37a8c31a2870113f8d\": container with ID starting with e7667282ce349d78c2e7952f274194c912a249106ac07c37a8c31a2870113f8d not found: ID does not exist" Nov 26 22:59:26 crc kubenswrapper[4903]: I1126 22:59:26.046986 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f8786fb-a534-48a4-b2a5-9674f2fd5d46" path="/var/lib/kubelet/pods/3f8786fb-a534-48a4-b2a5-9674f2fd5d46/volumes" Nov 26 23:00:00 crc kubenswrapper[4903]: I1126 23:00:00.151187 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403300-mw9h4"] Nov 26 23:00:00 crc kubenswrapper[4903]: E1126 23:00:00.152277 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0f6f421-9556-4066-b38c-76bf6ab640da" containerName="extract-content" Nov 26 23:00:00 crc kubenswrapper[4903]: I1126 23:00:00.152297 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0f6f421-9556-4066-b38c-76bf6ab640da" containerName="extract-content" Nov 26 23:00:00 crc kubenswrapper[4903]: E1126 23:00:00.152321 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f8786fb-a534-48a4-b2a5-9674f2fd5d46" containerName="extract-utilities" Nov 26 23:00:00 crc kubenswrapper[4903]: I1126 23:00:00.152330 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f8786fb-a534-48a4-b2a5-9674f2fd5d46" containerName="extract-utilities" Nov 26 23:00:00 crc kubenswrapper[4903]: E1126 23:00:00.152360 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f8786fb-a534-48a4-b2a5-9674f2fd5d46" containerName="registry-server" Nov 26 23:00:00 crc kubenswrapper[4903]: I1126 23:00:00.152368 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f8786fb-a534-48a4-b2a5-9674f2fd5d46" containerName="registry-server" Nov 26 23:00:00 crc kubenswrapper[4903]: E1126 23:00:00.152395 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0f6f421-9556-4066-b38c-76bf6ab640da" containerName="registry-server" Nov 26 23:00:00 crc kubenswrapper[4903]: I1126 23:00:00.152402 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0f6f421-9556-4066-b38c-76bf6ab640da" containerName="registry-server" Nov 26 23:00:00 crc kubenswrapper[4903]: E1126 23:00:00.152413 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0f6f421-9556-4066-b38c-76bf6ab640da" containerName="extract-utilities" Nov 26 23:00:00 crc kubenswrapper[4903]: I1126 23:00:00.152420 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0f6f421-9556-4066-b38c-76bf6ab640da" containerName="extract-utilities" Nov 26 23:00:00 crc kubenswrapper[4903]: E1126 23:00:00.152438 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f8786fb-a534-48a4-b2a5-9674f2fd5d46" containerName="extract-content" Nov 26 23:00:00 crc kubenswrapper[4903]: I1126 23:00:00.152444 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f8786fb-a534-48a4-b2a5-9674f2fd5d46" containerName="extract-content" Nov 26 23:00:00 crc kubenswrapper[4903]: I1126 23:00:00.152735 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f8786fb-a534-48a4-b2a5-9674f2fd5d46" containerName="registry-server" Nov 26 23:00:00 crc kubenswrapper[4903]: I1126 23:00:00.152769 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0f6f421-9556-4066-b38c-76bf6ab640da" containerName="registry-server" Nov 26 23:00:00 crc kubenswrapper[4903]: I1126 23:00:00.153912 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403300-mw9h4" Nov 26 23:00:00 crc kubenswrapper[4903]: I1126 23:00:00.156010 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 23:00:00 crc kubenswrapper[4903]: I1126 23:00:00.156363 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 23:00:00 crc kubenswrapper[4903]: I1126 23:00:00.173648 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403300-mw9h4"] Nov 26 23:00:00 crc kubenswrapper[4903]: I1126 23:00:00.272314 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hzhkk\" (UniqueName: \"kubernetes.io/projected/6f993922-4feb-4ed0-9748-58ce116e51a5-kube-api-access-hzhkk\") pod \"collect-profiles-29403300-mw9h4\" (UID: \"6f993922-4feb-4ed0-9748-58ce116e51a5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403300-mw9h4" Nov 26 23:00:00 crc kubenswrapper[4903]: I1126 23:00:00.272599 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6f993922-4feb-4ed0-9748-58ce116e51a5-secret-volume\") pod \"collect-profiles-29403300-mw9h4\" (UID: \"6f993922-4feb-4ed0-9748-58ce116e51a5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403300-mw9h4" Nov 26 23:00:00 crc kubenswrapper[4903]: I1126 23:00:00.272637 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6f993922-4feb-4ed0-9748-58ce116e51a5-config-volume\") pod \"collect-profiles-29403300-mw9h4\" (UID: \"6f993922-4feb-4ed0-9748-58ce116e51a5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403300-mw9h4" Nov 26 23:00:00 crc kubenswrapper[4903]: I1126 23:00:00.375103 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hzhkk\" (UniqueName: \"kubernetes.io/projected/6f993922-4feb-4ed0-9748-58ce116e51a5-kube-api-access-hzhkk\") pod \"collect-profiles-29403300-mw9h4\" (UID: \"6f993922-4feb-4ed0-9748-58ce116e51a5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403300-mw9h4" Nov 26 23:00:00 crc kubenswrapper[4903]: I1126 23:00:00.375277 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6f993922-4feb-4ed0-9748-58ce116e51a5-secret-volume\") pod \"collect-profiles-29403300-mw9h4\" (UID: \"6f993922-4feb-4ed0-9748-58ce116e51a5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403300-mw9h4" Nov 26 23:00:00 crc kubenswrapper[4903]: I1126 23:00:00.375302 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6f993922-4feb-4ed0-9748-58ce116e51a5-config-volume\") pod \"collect-profiles-29403300-mw9h4\" (UID: \"6f993922-4feb-4ed0-9748-58ce116e51a5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403300-mw9h4" Nov 26 23:00:00 crc kubenswrapper[4903]: I1126 23:00:00.376496 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6f993922-4feb-4ed0-9748-58ce116e51a5-config-volume\") pod \"collect-profiles-29403300-mw9h4\" (UID: \"6f993922-4feb-4ed0-9748-58ce116e51a5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403300-mw9h4" Nov 26 23:00:00 crc kubenswrapper[4903]: I1126 23:00:00.385548 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6f993922-4feb-4ed0-9748-58ce116e51a5-secret-volume\") pod \"collect-profiles-29403300-mw9h4\" (UID: \"6f993922-4feb-4ed0-9748-58ce116e51a5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403300-mw9h4" Nov 26 23:00:00 crc kubenswrapper[4903]: I1126 23:00:00.399743 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hzhkk\" (UniqueName: \"kubernetes.io/projected/6f993922-4feb-4ed0-9748-58ce116e51a5-kube-api-access-hzhkk\") pod \"collect-profiles-29403300-mw9h4\" (UID: \"6f993922-4feb-4ed0-9748-58ce116e51a5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403300-mw9h4" Nov 26 23:00:00 crc kubenswrapper[4903]: I1126 23:00:00.484962 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403300-mw9h4" Nov 26 23:00:01 crc kubenswrapper[4903]: I1126 23:00:01.035789 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403300-mw9h4"] Nov 26 23:00:01 crc kubenswrapper[4903]: I1126 23:00:01.665873 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403300-mw9h4" event={"ID":"6f993922-4feb-4ed0-9748-58ce116e51a5","Type":"ContainerStarted","Data":"e0de3ff83f1090fad3af95cb5fd4dad641981156ab0872d14e053be32b82b376"} Nov 26 23:00:01 crc kubenswrapper[4903]: I1126 23:00:01.666119 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403300-mw9h4" event={"ID":"6f993922-4feb-4ed0-9748-58ce116e51a5","Type":"ContainerStarted","Data":"22cf9771895a6aa858a2b6abd1b4f868ac764dec920a57aa95ec36e53c0092c6"} Nov 26 23:00:01 crc kubenswrapper[4903]: I1126 23:00:01.703008 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29403300-mw9h4" podStartSLOduration=1.702989141 podStartE2EDuration="1.702989141s" podCreationTimestamp="2025-11-26 23:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 23:00:01.695996454 +0000 UTC m=+2330.386231364" watchObservedRunningTime="2025-11-26 23:00:01.702989141 +0000 UTC m=+2330.393224051" Nov 26 23:00:02 crc kubenswrapper[4903]: I1126 23:00:02.679082 4903 generic.go:334] "Generic (PLEG): container finished" podID="6f993922-4feb-4ed0-9748-58ce116e51a5" containerID="e0de3ff83f1090fad3af95cb5fd4dad641981156ab0872d14e053be32b82b376" exitCode=0 Nov 26 23:00:02 crc kubenswrapper[4903]: I1126 23:00:02.679177 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403300-mw9h4" event={"ID":"6f993922-4feb-4ed0-9748-58ce116e51a5","Type":"ContainerDied","Data":"e0de3ff83f1090fad3af95cb5fd4dad641981156ab0872d14e053be32b82b376"} Nov 26 23:00:04 crc kubenswrapper[4903]: I1126 23:00:04.144108 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403300-mw9h4" Nov 26 23:00:04 crc kubenswrapper[4903]: I1126 23:00:04.271413 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hzhkk\" (UniqueName: \"kubernetes.io/projected/6f993922-4feb-4ed0-9748-58ce116e51a5-kube-api-access-hzhkk\") pod \"6f993922-4feb-4ed0-9748-58ce116e51a5\" (UID: \"6f993922-4feb-4ed0-9748-58ce116e51a5\") " Nov 26 23:00:04 crc kubenswrapper[4903]: I1126 23:00:04.271488 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6f993922-4feb-4ed0-9748-58ce116e51a5-config-volume\") pod \"6f993922-4feb-4ed0-9748-58ce116e51a5\" (UID: \"6f993922-4feb-4ed0-9748-58ce116e51a5\") " Nov 26 23:00:04 crc kubenswrapper[4903]: I1126 23:00:04.271588 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6f993922-4feb-4ed0-9748-58ce116e51a5-secret-volume\") pod \"6f993922-4feb-4ed0-9748-58ce116e51a5\" (UID: \"6f993922-4feb-4ed0-9748-58ce116e51a5\") " Nov 26 23:00:04 crc kubenswrapper[4903]: I1126 23:00:04.273549 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6f993922-4feb-4ed0-9748-58ce116e51a5-config-volume" (OuterVolumeSpecName: "config-volume") pod "6f993922-4feb-4ed0-9748-58ce116e51a5" (UID: "6f993922-4feb-4ed0-9748-58ce116e51a5"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 23:00:04 crc kubenswrapper[4903]: I1126 23:00:04.278841 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f993922-4feb-4ed0-9748-58ce116e51a5-kube-api-access-hzhkk" (OuterVolumeSpecName: "kube-api-access-hzhkk") pod "6f993922-4feb-4ed0-9748-58ce116e51a5" (UID: "6f993922-4feb-4ed0-9748-58ce116e51a5"). InnerVolumeSpecName "kube-api-access-hzhkk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:00:04 crc kubenswrapper[4903]: I1126 23:00:04.283891 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f993922-4feb-4ed0-9748-58ce116e51a5-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "6f993922-4feb-4ed0-9748-58ce116e51a5" (UID: "6f993922-4feb-4ed0-9748-58ce116e51a5"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:00:04 crc kubenswrapper[4903]: I1126 23:00:04.374230 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hzhkk\" (UniqueName: \"kubernetes.io/projected/6f993922-4feb-4ed0-9748-58ce116e51a5-kube-api-access-hzhkk\") on node \"crc\" DevicePath \"\"" Nov 26 23:00:04 crc kubenswrapper[4903]: I1126 23:00:04.374266 4903 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6f993922-4feb-4ed0-9748-58ce116e51a5-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 23:00:04 crc kubenswrapper[4903]: I1126 23:00:04.374277 4903 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6f993922-4feb-4ed0-9748-58ce116e51a5-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 23:00:04 crc kubenswrapper[4903]: I1126 23:00:04.719349 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403300-mw9h4" event={"ID":"6f993922-4feb-4ed0-9748-58ce116e51a5","Type":"ContainerDied","Data":"22cf9771895a6aa858a2b6abd1b4f868ac764dec920a57aa95ec36e53c0092c6"} Nov 26 23:00:04 crc kubenswrapper[4903]: I1126 23:00:04.719401 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="22cf9771895a6aa858a2b6abd1b4f868ac764dec920a57aa95ec36e53c0092c6" Nov 26 23:00:04 crc kubenswrapper[4903]: I1126 23:00:04.719449 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403300-mw9h4" Nov 26 23:00:05 crc kubenswrapper[4903]: I1126 23:00:05.298790 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403255-kmtfb"] Nov 26 23:00:05 crc kubenswrapper[4903]: I1126 23:00:05.318183 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403255-kmtfb"] Nov 26 23:00:05 crc kubenswrapper[4903]: I1126 23:00:05.738667 4903 generic.go:334] "Generic (PLEG): container finished" podID="d791fd3c-48a9-44e4-85e2-9e0f088ecb6c" containerID="3300f505b30d5c4b49d4469c876cfe48b1cd351709d1eefeb21c107c2cc1422d" exitCode=0 Nov 26 23:00:05 crc kubenswrapper[4903]: I1126 23:00:05.738777 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wcwtk" event={"ID":"d791fd3c-48a9-44e4-85e2-9e0f088ecb6c","Type":"ContainerDied","Data":"3300f505b30d5c4b49d4469c876cfe48b1cd351709d1eefeb21c107c2cc1422d"} Nov 26 23:00:06 crc kubenswrapper[4903]: I1126 23:00:06.049797 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6141f47a-9c9b-4ef8-85c9-518f947bff57" path="/var/lib/kubelet/pods/6141f47a-9c9b-4ef8-85c9-518f947bff57/volumes" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.359183 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wcwtk" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.461918 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d791fd3c-48a9-44e4-85e2-9e0f088ecb6c-ssh-key\") pod \"d791fd3c-48a9-44e4-85e2-9e0f088ecb6c\" (UID: \"d791fd3c-48a9-44e4-85e2-9e0f088ecb6c\") " Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.461984 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d791fd3c-48a9-44e4-85e2-9e0f088ecb6c-inventory\") pod \"d791fd3c-48a9-44e4-85e2-9e0f088ecb6c\" (UID: \"d791fd3c-48a9-44e4-85e2-9e0f088ecb6c\") " Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.462009 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nxkkh\" (UniqueName: \"kubernetes.io/projected/d791fd3c-48a9-44e4-85e2-9e0f088ecb6c-kube-api-access-nxkkh\") pod \"d791fd3c-48a9-44e4-85e2-9e0f088ecb6c\" (UID: \"d791fd3c-48a9-44e4-85e2-9e0f088ecb6c\") " Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.462223 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d791fd3c-48a9-44e4-85e2-9e0f088ecb6c-ovn-combined-ca-bundle\") pod \"d791fd3c-48a9-44e4-85e2-9e0f088ecb6c\" (UID: \"d791fd3c-48a9-44e4-85e2-9e0f088ecb6c\") " Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.462325 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/d791fd3c-48a9-44e4-85e2-9e0f088ecb6c-ovncontroller-config-0\") pod \"d791fd3c-48a9-44e4-85e2-9e0f088ecb6c\" (UID: \"d791fd3c-48a9-44e4-85e2-9e0f088ecb6c\") " Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.477520 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d791fd3c-48a9-44e4-85e2-9e0f088ecb6c-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "d791fd3c-48a9-44e4-85e2-9e0f088ecb6c" (UID: "d791fd3c-48a9-44e4-85e2-9e0f088ecb6c"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.477888 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d791fd3c-48a9-44e4-85e2-9e0f088ecb6c-kube-api-access-nxkkh" (OuterVolumeSpecName: "kube-api-access-nxkkh") pod "d791fd3c-48a9-44e4-85e2-9e0f088ecb6c" (UID: "d791fd3c-48a9-44e4-85e2-9e0f088ecb6c"). InnerVolumeSpecName "kube-api-access-nxkkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.499277 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d791fd3c-48a9-44e4-85e2-9e0f088ecb6c-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "d791fd3c-48a9-44e4-85e2-9e0f088ecb6c" (UID: "d791fd3c-48a9-44e4-85e2-9e0f088ecb6c"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.501736 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d791fd3c-48a9-44e4-85e2-9e0f088ecb6c-inventory" (OuterVolumeSpecName: "inventory") pod "d791fd3c-48a9-44e4-85e2-9e0f088ecb6c" (UID: "d791fd3c-48a9-44e4-85e2-9e0f088ecb6c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.505230 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d791fd3c-48a9-44e4-85e2-9e0f088ecb6c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d791fd3c-48a9-44e4-85e2-9e0f088ecb6c" (UID: "d791fd3c-48a9-44e4-85e2-9e0f088ecb6c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.566491 4903 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d791fd3c-48a9-44e4-85e2-9e0f088ecb6c-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.566531 4903 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/d791fd3c-48a9-44e4-85e2-9e0f088ecb6c-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.566540 4903 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d791fd3c-48a9-44e4-85e2-9e0f088ecb6c-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.566549 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nxkkh\" (UniqueName: \"kubernetes.io/projected/d791fd3c-48a9-44e4-85e2-9e0f088ecb6c-kube-api-access-nxkkh\") on node \"crc\" DevicePath \"\"" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.566558 4903 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d791fd3c-48a9-44e4-85e2-9e0f088ecb6c-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.766918 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wcwtk" event={"ID":"d791fd3c-48a9-44e4-85e2-9e0f088ecb6c","Type":"ContainerDied","Data":"ce6f84ba7afbd469300e4e27e2521e8cf7bfc00d20878240d4ae42774c921562"} Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.766957 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ce6f84ba7afbd469300e4e27e2521e8cf7bfc00d20878240d4ae42774c921562" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.767001 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wcwtk" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.933156 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb"] Nov 26 23:00:07 crc kubenswrapper[4903]: E1126 23:00:07.934531 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d791fd3c-48a9-44e4-85e2-9e0f088ecb6c" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.934566 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="d791fd3c-48a9-44e4-85e2-9e0f088ecb6c" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 26 23:00:07 crc kubenswrapper[4903]: E1126 23:00:07.934620 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f993922-4feb-4ed0-9748-58ce116e51a5" containerName="collect-profiles" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.934633 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f993922-4feb-4ed0-9748-58ce116e51a5" containerName="collect-profiles" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.935223 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="d791fd3c-48a9-44e4-85e2-9e0f088ecb6c" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.935299 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f993922-4feb-4ed0-9748-58ce116e51a5" containerName="collect-profiles" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.936667 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.940543 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.940731 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.940961 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-62bf2" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.941621 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.941811 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.941984 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.958879 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb"] Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.977595 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb\" (UID: \"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.977754 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb\" (UID: \"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.977831 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb\" (UID: \"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.977996 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb\" (UID: \"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.978054 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4l6kl\" (UniqueName: \"kubernetes.io/projected/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-kube-api-access-4l6kl\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb\" (UID: \"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb" Nov 26 23:00:07 crc kubenswrapper[4903]: I1126 23:00:07.978094 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb\" (UID: \"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb" Nov 26 23:00:08 crc kubenswrapper[4903]: I1126 23:00:08.079924 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb\" (UID: \"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb" Nov 26 23:00:08 crc kubenswrapper[4903]: I1126 23:00:08.079980 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4l6kl\" (UniqueName: \"kubernetes.io/projected/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-kube-api-access-4l6kl\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb\" (UID: \"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb" Nov 26 23:00:08 crc kubenswrapper[4903]: I1126 23:00:08.080044 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb\" (UID: \"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb" Nov 26 23:00:08 crc kubenswrapper[4903]: I1126 23:00:08.080288 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb\" (UID: \"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb" Nov 26 23:00:08 crc kubenswrapper[4903]: I1126 23:00:08.080917 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb\" (UID: \"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb" Nov 26 23:00:08 crc kubenswrapper[4903]: I1126 23:00:08.081282 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb\" (UID: \"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb" Nov 26 23:00:08 crc kubenswrapper[4903]: I1126 23:00:08.083848 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb\" (UID: \"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb" Nov 26 23:00:08 crc kubenswrapper[4903]: I1126 23:00:08.084525 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb\" (UID: \"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb" Nov 26 23:00:08 crc kubenswrapper[4903]: I1126 23:00:08.085631 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb\" (UID: \"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb" Nov 26 23:00:08 crc kubenswrapper[4903]: I1126 23:00:08.086522 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb\" (UID: \"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb" Nov 26 23:00:08 crc kubenswrapper[4903]: I1126 23:00:08.087951 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb\" (UID: \"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb" Nov 26 23:00:08 crc kubenswrapper[4903]: I1126 23:00:08.099127 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4l6kl\" (UniqueName: \"kubernetes.io/projected/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-kube-api-access-4l6kl\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb\" (UID: \"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb" Nov 26 23:00:08 crc kubenswrapper[4903]: I1126 23:00:08.263802 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb" Nov 26 23:00:09 crc kubenswrapper[4903]: I1126 23:00:09.266765 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb"] Nov 26 23:00:09 crc kubenswrapper[4903]: I1126 23:00:09.844919 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb" event={"ID":"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466","Type":"ContainerStarted","Data":"ab521fe0c78527ca309446a2675488fcbf49342eafb8a881dd75ea588054be2b"} Nov 26 23:00:10 crc kubenswrapper[4903]: I1126 23:00:10.862709 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb" event={"ID":"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466","Type":"ContainerStarted","Data":"1f62ba5810450c88b207c12427e246f4125ac8b7dd0646cedba14179a80e7432"} Nov 26 23:00:10 crc kubenswrapper[4903]: I1126 23:00:10.894017 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb" podStartSLOduration=2.666040301 podStartE2EDuration="3.893991448s" podCreationTimestamp="2025-11-26 23:00:07 +0000 UTC" firstStartedPulling="2025-11-26 23:00:09.269172139 +0000 UTC m=+2337.959407069" lastFinishedPulling="2025-11-26 23:00:10.497123266 +0000 UTC m=+2339.187358216" observedRunningTime="2025-11-26 23:00:10.884104194 +0000 UTC m=+2339.574339134" watchObservedRunningTime="2025-11-26 23:00:10.893991448 +0000 UTC m=+2339.584226388" Nov 26 23:00:21 crc kubenswrapper[4903]: I1126 23:00:21.690570 4903 scope.go:117] "RemoveContainer" containerID="3d9940a8328fc086586c8fd22e96569a735b99934c1eace8d3806fc3305b9844" Nov 26 23:00:31 crc kubenswrapper[4903]: I1126 23:00:31.981662 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 23:00:31 crc kubenswrapper[4903]: I1126 23:00:31.982535 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 23:00:41 crc kubenswrapper[4903]: I1126 23:00:41.475245 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ovn-northd-0" podUID="16d5105b-5e4e-4806-a873-a79e1aaccc68" containerName="ovn-northd" probeResult="failure" output="command timed out" Nov 26 23:01:00 crc kubenswrapper[4903]: I1126 23:01:00.171607 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29403301-ljvz7"] Nov 26 23:01:00 crc kubenswrapper[4903]: I1126 23:01:00.174929 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29403301-ljvz7" Nov 26 23:01:00 crc kubenswrapper[4903]: I1126 23:01:00.190761 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29403301-ljvz7"] Nov 26 23:01:00 crc kubenswrapper[4903]: I1126 23:01:00.258628 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be70a483-c763-4980-a995-61d1a6f5573e-config-data\") pod \"keystone-cron-29403301-ljvz7\" (UID: \"be70a483-c763-4980-a995-61d1a6f5573e\") " pod="openstack/keystone-cron-29403301-ljvz7" Nov 26 23:01:00 crc kubenswrapper[4903]: I1126 23:01:00.258701 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be70a483-c763-4980-a995-61d1a6f5573e-combined-ca-bundle\") pod \"keystone-cron-29403301-ljvz7\" (UID: \"be70a483-c763-4980-a995-61d1a6f5573e\") " pod="openstack/keystone-cron-29403301-ljvz7" Nov 26 23:01:00 crc kubenswrapper[4903]: I1126 23:01:00.258807 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/be70a483-c763-4980-a995-61d1a6f5573e-fernet-keys\") pod \"keystone-cron-29403301-ljvz7\" (UID: \"be70a483-c763-4980-a995-61d1a6f5573e\") " pod="openstack/keystone-cron-29403301-ljvz7" Nov 26 23:01:00 crc kubenswrapper[4903]: I1126 23:01:00.258842 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7vhw7\" (UniqueName: \"kubernetes.io/projected/be70a483-c763-4980-a995-61d1a6f5573e-kube-api-access-7vhw7\") pod \"keystone-cron-29403301-ljvz7\" (UID: \"be70a483-c763-4980-a995-61d1a6f5573e\") " pod="openstack/keystone-cron-29403301-ljvz7" Nov 26 23:01:00 crc kubenswrapper[4903]: I1126 23:01:00.361558 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be70a483-c763-4980-a995-61d1a6f5573e-config-data\") pod \"keystone-cron-29403301-ljvz7\" (UID: \"be70a483-c763-4980-a995-61d1a6f5573e\") " pod="openstack/keystone-cron-29403301-ljvz7" Nov 26 23:01:00 crc kubenswrapper[4903]: I1126 23:01:00.361993 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be70a483-c763-4980-a995-61d1a6f5573e-combined-ca-bundle\") pod \"keystone-cron-29403301-ljvz7\" (UID: \"be70a483-c763-4980-a995-61d1a6f5573e\") " pod="openstack/keystone-cron-29403301-ljvz7" Nov 26 23:01:00 crc kubenswrapper[4903]: I1126 23:01:00.362212 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/be70a483-c763-4980-a995-61d1a6f5573e-fernet-keys\") pod \"keystone-cron-29403301-ljvz7\" (UID: \"be70a483-c763-4980-a995-61d1a6f5573e\") " pod="openstack/keystone-cron-29403301-ljvz7" Nov 26 23:01:00 crc kubenswrapper[4903]: I1126 23:01:00.362451 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7vhw7\" (UniqueName: \"kubernetes.io/projected/be70a483-c763-4980-a995-61d1a6f5573e-kube-api-access-7vhw7\") pod \"keystone-cron-29403301-ljvz7\" (UID: \"be70a483-c763-4980-a995-61d1a6f5573e\") " pod="openstack/keystone-cron-29403301-ljvz7" Nov 26 23:01:00 crc kubenswrapper[4903]: I1126 23:01:00.369177 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be70a483-c763-4980-a995-61d1a6f5573e-combined-ca-bundle\") pod \"keystone-cron-29403301-ljvz7\" (UID: \"be70a483-c763-4980-a995-61d1a6f5573e\") " pod="openstack/keystone-cron-29403301-ljvz7" Nov 26 23:01:00 crc kubenswrapper[4903]: I1126 23:01:00.369416 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/be70a483-c763-4980-a995-61d1a6f5573e-fernet-keys\") pod \"keystone-cron-29403301-ljvz7\" (UID: \"be70a483-c763-4980-a995-61d1a6f5573e\") " pod="openstack/keystone-cron-29403301-ljvz7" Nov 26 23:01:00 crc kubenswrapper[4903]: I1126 23:01:00.372615 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be70a483-c763-4980-a995-61d1a6f5573e-config-data\") pod \"keystone-cron-29403301-ljvz7\" (UID: \"be70a483-c763-4980-a995-61d1a6f5573e\") " pod="openstack/keystone-cron-29403301-ljvz7" Nov 26 23:01:00 crc kubenswrapper[4903]: I1126 23:01:00.389221 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7vhw7\" (UniqueName: \"kubernetes.io/projected/be70a483-c763-4980-a995-61d1a6f5573e-kube-api-access-7vhw7\") pod \"keystone-cron-29403301-ljvz7\" (UID: \"be70a483-c763-4980-a995-61d1a6f5573e\") " pod="openstack/keystone-cron-29403301-ljvz7" Nov 26 23:01:00 crc kubenswrapper[4903]: I1126 23:01:00.508567 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29403301-ljvz7" Nov 26 23:01:01 crc kubenswrapper[4903]: I1126 23:01:01.076929 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29403301-ljvz7"] Nov 26 23:01:01 crc kubenswrapper[4903]: W1126 23:01:01.094783 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbe70a483_c763_4980_a995_61d1a6f5573e.slice/crio-e6ae97b22b7b0eb39e454b279ae9b93cdcdb5d8dc6e95d005d60f1eb768bc31a WatchSource:0}: Error finding container e6ae97b22b7b0eb39e454b279ae9b93cdcdb5d8dc6e95d005d60f1eb768bc31a: Status 404 returned error can't find the container with id e6ae97b22b7b0eb39e454b279ae9b93cdcdb5d8dc6e95d005d60f1eb768bc31a Nov 26 23:01:01 crc kubenswrapper[4903]: I1126 23:01:01.820096 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29403301-ljvz7" event={"ID":"be70a483-c763-4980-a995-61d1a6f5573e","Type":"ContainerStarted","Data":"27d4a17ebc6a1e39cbb30e6aa94f59ef49d3ccd63ee042625db40b4dfa95d1fc"} Nov 26 23:01:01 crc kubenswrapper[4903]: I1126 23:01:01.820741 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29403301-ljvz7" event={"ID":"be70a483-c763-4980-a995-61d1a6f5573e","Type":"ContainerStarted","Data":"e6ae97b22b7b0eb39e454b279ae9b93cdcdb5d8dc6e95d005d60f1eb768bc31a"} Nov 26 23:01:01 crc kubenswrapper[4903]: I1126 23:01:01.848756 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29403301-ljvz7" podStartSLOduration=1.848740985 podStartE2EDuration="1.848740985s" podCreationTimestamp="2025-11-26 23:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 23:01:01.842360715 +0000 UTC m=+2390.532595665" watchObservedRunningTime="2025-11-26 23:01:01.848740985 +0000 UTC m=+2390.538975895" Nov 26 23:01:01 crc kubenswrapper[4903]: I1126 23:01:01.981844 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 23:01:01 crc kubenswrapper[4903]: I1126 23:01:01.981924 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 23:01:04 crc kubenswrapper[4903]: I1126 23:01:04.859447 4903 generic.go:334] "Generic (PLEG): container finished" podID="be70a483-c763-4980-a995-61d1a6f5573e" containerID="27d4a17ebc6a1e39cbb30e6aa94f59ef49d3ccd63ee042625db40b4dfa95d1fc" exitCode=0 Nov 26 23:01:04 crc kubenswrapper[4903]: I1126 23:01:04.859844 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29403301-ljvz7" event={"ID":"be70a483-c763-4980-a995-61d1a6f5573e","Type":"ContainerDied","Data":"27d4a17ebc6a1e39cbb30e6aa94f59ef49d3ccd63ee042625db40b4dfa95d1fc"} Nov 26 23:01:06 crc kubenswrapper[4903]: I1126 23:01:06.419458 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29403301-ljvz7" Nov 26 23:01:06 crc kubenswrapper[4903]: I1126 23:01:06.528856 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be70a483-c763-4980-a995-61d1a6f5573e-config-data\") pod \"be70a483-c763-4980-a995-61d1a6f5573e\" (UID: \"be70a483-c763-4980-a995-61d1a6f5573e\") " Nov 26 23:01:06 crc kubenswrapper[4903]: I1126 23:01:06.528971 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/be70a483-c763-4980-a995-61d1a6f5573e-fernet-keys\") pod \"be70a483-c763-4980-a995-61d1a6f5573e\" (UID: \"be70a483-c763-4980-a995-61d1a6f5573e\") " Nov 26 23:01:06 crc kubenswrapper[4903]: I1126 23:01:06.528994 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7vhw7\" (UniqueName: \"kubernetes.io/projected/be70a483-c763-4980-a995-61d1a6f5573e-kube-api-access-7vhw7\") pod \"be70a483-c763-4980-a995-61d1a6f5573e\" (UID: \"be70a483-c763-4980-a995-61d1a6f5573e\") " Nov 26 23:01:06 crc kubenswrapper[4903]: I1126 23:01:06.529205 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be70a483-c763-4980-a995-61d1a6f5573e-combined-ca-bundle\") pod \"be70a483-c763-4980-a995-61d1a6f5573e\" (UID: \"be70a483-c763-4980-a995-61d1a6f5573e\") " Nov 26 23:01:06 crc kubenswrapper[4903]: I1126 23:01:06.538068 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be70a483-c763-4980-a995-61d1a6f5573e-kube-api-access-7vhw7" (OuterVolumeSpecName: "kube-api-access-7vhw7") pod "be70a483-c763-4980-a995-61d1a6f5573e" (UID: "be70a483-c763-4980-a995-61d1a6f5573e"). InnerVolumeSpecName "kube-api-access-7vhw7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:01:06 crc kubenswrapper[4903]: I1126 23:01:06.541141 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be70a483-c763-4980-a995-61d1a6f5573e-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "be70a483-c763-4980-a995-61d1a6f5573e" (UID: "be70a483-c763-4980-a995-61d1a6f5573e"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:01:06 crc kubenswrapper[4903]: I1126 23:01:06.592785 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be70a483-c763-4980-a995-61d1a6f5573e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "be70a483-c763-4980-a995-61d1a6f5573e" (UID: "be70a483-c763-4980-a995-61d1a6f5573e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:01:06 crc kubenswrapper[4903]: I1126 23:01:06.614518 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be70a483-c763-4980-a995-61d1a6f5573e-config-data" (OuterVolumeSpecName: "config-data") pod "be70a483-c763-4980-a995-61d1a6f5573e" (UID: "be70a483-c763-4980-a995-61d1a6f5573e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:01:06 crc kubenswrapper[4903]: I1126 23:01:06.632562 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be70a483-c763-4980-a995-61d1a6f5573e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 23:01:06 crc kubenswrapper[4903]: I1126 23:01:06.632599 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be70a483-c763-4980-a995-61d1a6f5573e-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 23:01:06 crc kubenswrapper[4903]: I1126 23:01:06.632612 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7vhw7\" (UniqueName: \"kubernetes.io/projected/be70a483-c763-4980-a995-61d1a6f5573e-kube-api-access-7vhw7\") on node \"crc\" DevicePath \"\"" Nov 26 23:01:06 crc kubenswrapper[4903]: I1126 23:01:06.632628 4903 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/be70a483-c763-4980-a995-61d1a6f5573e-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 26 23:01:06 crc kubenswrapper[4903]: I1126 23:01:06.895849 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29403301-ljvz7" event={"ID":"be70a483-c763-4980-a995-61d1a6f5573e","Type":"ContainerDied","Data":"e6ae97b22b7b0eb39e454b279ae9b93cdcdb5d8dc6e95d005d60f1eb768bc31a"} Nov 26 23:01:06 crc kubenswrapper[4903]: I1126 23:01:06.895924 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e6ae97b22b7b0eb39e454b279ae9b93cdcdb5d8dc6e95d005d60f1eb768bc31a" Nov 26 23:01:06 crc kubenswrapper[4903]: I1126 23:01:06.895949 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29403301-ljvz7" Nov 26 23:01:08 crc kubenswrapper[4903]: I1126 23:01:08.925095 4903 generic.go:334] "Generic (PLEG): container finished" podID="c8b7f7a3-07d3-46c8-a5e2-0b08c743d466" containerID="1f62ba5810450c88b207c12427e246f4125ac8b7dd0646cedba14179a80e7432" exitCode=0 Nov 26 23:01:08 crc kubenswrapper[4903]: I1126 23:01:08.925164 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb" event={"ID":"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466","Type":"ContainerDied","Data":"1f62ba5810450c88b207c12427e246f4125ac8b7dd0646cedba14179a80e7432"} Nov 26 23:01:10 crc kubenswrapper[4903]: I1126 23:01:10.520559 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb" Nov 26 23:01:10 crc kubenswrapper[4903]: I1126 23:01:10.656150 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-inventory\") pod \"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466\" (UID: \"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466\") " Nov 26 23:01:10 crc kubenswrapper[4903]: I1126 23:01:10.656258 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4l6kl\" (UniqueName: \"kubernetes.io/projected/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-kube-api-access-4l6kl\") pod \"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466\" (UID: \"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466\") " Nov 26 23:01:10 crc kubenswrapper[4903]: I1126 23:01:10.656311 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-ssh-key\") pod \"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466\" (UID: \"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466\") " Nov 26 23:01:10 crc kubenswrapper[4903]: I1126 23:01:10.656347 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-nova-metadata-neutron-config-0\") pod \"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466\" (UID: \"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466\") " Nov 26 23:01:10 crc kubenswrapper[4903]: I1126 23:01:10.656445 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-neutron-metadata-combined-ca-bundle\") pod \"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466\" (UID: \"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466\") " Nov 26 23:01:10 crc kubenswrapper[4903]: I1126 23:01:10.656560 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-neutron-ovn-metadata-agent-neutron-config-0\") pod \"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466\" (UID: \"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466\") " Nov 26 23:01:10 crc kubenswrapper[4903]: I1126 23:01:10.663127 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-kube-api-access-4l6kl" (OuterVolumeSpecName: "kube-api-access-4l6kl") pod "c8b7f7a3-07d3-46c8-a5e2-0b08c743d466" (UID: "c8b7f7a3-07d3-46c8-a5e2-0b08c743d466"). InnerVolumeSpecName "kube-api-access-4l6kl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:01:10 crc kubenswrapper[4903]: I1126 23:01:10.664042 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "c8b7f7a3-07d3-46c8-a5e2-0b08c743d466" (UID: "c8b7f7a3-07d3-46c8-a5e2-0b08c743d466"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:01:10 crc kubenswrapper[4903]: I1126 23:01:10.696143 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "c8b7f7a3-07d3-46c8-a5e2-0b08c743d466" (UID: "c8b7f7a3-07d3-46c8-a5e2-0b08c743d466"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:01:10 crc kubenswrapper[4903]: I1126 23:01:10.709174 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-inventory" (OuterVolumeSpecName: "inventory") pod "c8b7f7a3-07d3-46c8-a5e2-0b08c743d466" (UID: "c8b7f7a3-07d3-46c8-a5e2-0b08c743d466"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:01:10 crc kubenswrapper[4903]: I1126 23:01:10.713517 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "c8b7f7a3-07d3-46c8-a5e2-0b08c743d466" (UID: "c8b7f7a3-07d3-46c8-a5e2-0b08c743d466"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:01:10 crc kubenswrapper[4903]: I1126 23:01:10.717521 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c8b7f7a3-07d3-46c8-a5e2-0b08c743d466" (UID: "c8b7f7a3-07d3-46c8-a5e2-0b08c743d466"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:01:10 crc kubenswrapper[4903]: I1126 23:01:10.760460 4903 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 23:01:10 crc kubenswrapper[4903]: I1126 23:01:10.760640 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4l6kl\" (UniqueName: \"kubernetes.io/projected/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-kube-api-access-4l6kl\") on node \"crc\" DevicePath \"\"" Nov 26 23:01:10 crc kubenswrapper[4903]: I1126 23:01:10.760738 4903 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 23:01:10 crc kubenswrapper[4903]: I1126 23:01:10.760790 4903 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 23:01:10 crc kubenswrapper[4903]: I1126 23:01:10.760838 4903 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 23:01:10 crc kubenswrapper[4903]: I1126 23:01:10.760887 4903 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c8b7f7a3-07d3-46c8-a5e2-0b08c743d466-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 23:01:10 crc kubenswrapper[4903]: I1126 23:01:10.958276 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb" event={"ID":"c8b7f7a3-07d3-46c8-a5e2-0b08c743d466","Type":"ContainerDied","Data":"ab521fe0c78527ca309446a2675488fcbf49342eafb8a881dd75ea588054be2b"} Nov 26 23:01:10 crc kubenswrapper[4903]: I1126 23:01:10.958336 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ab521fe0c78527ca309446a2675488fcbf49342eafb8a881dd75ea588054be2b" Nov 26 23:01:10 crc kubenswrapper[4903]: I1126 23:01:10.958339 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb" Nov 26 23:01:11 crc kubenswrapper[4903]: I1126 23:01:11.052657 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8jt67"] Nov 26 23:01:11 crc kubenswrapper[4903]: E1126 23:01:11.053315 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8b7f7a3-07d3-46c8-a5e2-0b08c743d466" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 26 23:01:11 crc kubenswrapper[4903]: I1126 23:01:11.053339 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8b7f7a3-07d3-46c8-a5e2-0b08c743d466" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 26 23:01:11 crc kubenswrapper[4903]: E1126 23:01:11.053404 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be70a483-c763-4980-a995-61d1a6f5573e" containerName="keystone-cron" Nov 26 23:01:11 crc kubenswrapper[4903]: I1126 23:01:11.053419 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="be70a483-c763-4980-a995-61d1a6f5573e" containerName="keystone-cron" Nov 26 23:01:11 crc kubenswrapper[4903]: I1126 23:01:11.053731 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="be70a483-c763-4980-a995-61d1a6f5573e" containerName="keystone-cron" Nov 26 23:01:11 crc kubenswrapper[4903]: I1126 23:01:11.053769 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8b7f7a3-07d3-46c8-a5e2-0b08c743d466" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 26 23:01:11 crc kubenswrapper[4903]: I1126 23:01:11.054794 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8jt67" Nov 26 23:01:11 crc kubenswrapper[4903]: I1126 23:01:11.060178 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-62bf2" Nov 26 23:01:11 crc kubenswrapper[4903]: I1126 23:01:11.060481 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 23:01:11 crc kubenswrapper[4903]: I1126 23:01:11.060973 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 23:01:11 crc kubenswrapper[4903]: I1126 23:01:11.061115 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 26 23:01:11 crc kubenswrapper[4903]: I1126 23:01:11.061136 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 23:01:11 crc kubenswrapper[4903]: I1126 23:01:11.066926 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8jt67"] Nov 26 23:01:11 crc kubenswrapper[4903]: I1126 23:01:11.169329 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nw4vx\" (UniqueName: \"kubernetes.io/projected/402f509f-516b-446b-a5e8-f42c6aa65ed7-kube-api-access-nw4vx\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8jt67\" (UID: \"402f509f-516b-446b-a5e8-f42c6aa65ed7\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8jt67" Nov 26 23:01:11 crc kubenswrapper[4903]: I1126 23:01:11.169384 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/402f509f-516b-446b-a5e8-f42c6aa65ed7-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8jt67\" (UID: \"402f509f-516b-446b-a5e8-f42c6aa65ed7\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8jt67" Nov 26 23:01:11 crc kubenswrapper[4903]: I1126 23:01:11.169523 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/402f509f-516b-446b-a5e8-f42c6aa65ed7-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8jt67\" (UID: \"402f509f-516b-446b-a5e8-f42c6aa65ed7\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8jt67" Nov 26 23:01:11 crc kubenswrapper[4903]: I1126 23:01:11.169790 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/402f509f-516b-446b-a5e8-f42c6aa65ed7-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8jt67\" (UID: \"402f509f-516b-446b-a5e8-f42c6aa65ed7\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8jt67" Nov 26 23:01:11 crc kubenswrapper[4903]: I1126 23:01:11.169875 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/402f509f-516b-446b-a5e8-f42c6aa65ed7-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8jt67\" (UID: \"402f509f-516b-446b-a5e8-f42c6aa65ed7\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8jt67" Nov 26 23:01:11 crc kubenswrapper[4903]: I1126 23:01:11.272877 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nw4vx\" (UniqueName: \"kubernetes.io/projected/402f509f-516b-446b-a5e8-f42c6aa65ed7-kube-api-access-nw4vx\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8jt67\" (UID: \"402f509f-516b-446b-a5e8-f42c6aa65ed7\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8jt67" Nov 26 23:01:11 crc kubenswrapper[4903]: I1126 23:01:11.272952 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/402f509f-516b-446b-a5e8-f42c6aa65ed7-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8jt67\" (UID: \"402f509f-516b-446b-a5e8-f42c6aa65ed7\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8jt67" Nov 26 23:01:11 crc kubenswrapper[4903]: I1126 23:01:11.273103 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/402f509f-516b-446b-a5e8-f42c6aa65ed7-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8jt67\" (UID: \"402f509f-516b-446b-a5e8-f42c6aa65ed7\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8jt67" Nov 26 23:01:11 crc kubenswrapper[4903]: I1126 23:01:11.273257 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/402f509f-516b-446b-a5e8-f42c6aa65ed7-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8jt67\" (UID: \"402f509f-516b-446b-a5e8-f42c6aa65ed7\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8jt67" Nov 26 23:01:11 crc kubenswrapper[4903]: I1126 23:01:11.273301 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/402f509f-516b-446b-a5e8-f42c6aa65ed7-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8jt67\" (UID: \"402f509f-516b-446b-a5e8-f42c6aa65ed7\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8jt67" Nov 26 23:01:11 crc kubenswrapper[4903]: I1126 23:01:11.278412 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/402f509f-516b-446b-a5e8-f42c6aa65ed7-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8jt67\" (UID: \"402f509f-516b-446b-a5e8-f42c6aa65ed7\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8jt67" Nov 26 23:01:11 crc kubenswrapper[4903]: I1126 23:01:11.278987 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/402f509f-516b-446b-a5e8-f42c6aa65ed7-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8jt67\" (UID: \"402f509f-516b-446b-a5e8-f42c6aa65ed7\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8jt67" Nov 26 23:01:11 crc kubenswrapper[4903]: I1126 23:01:11.279072 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/402f509f-516b-446b-a5e8-f42c6aa65ed7-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8jt67\" (UID: \"402f509f-516b-446b-a5e8-f42c6aa65ed7\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8jt67" Nov 26 23:01:11 crc kubenswrapper[4903]: I1126 23:01:11.279370 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/402f509f-516b-446b-a5e8-f42c6aa65ed7-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8jt67\" (UID: \"402f509f-516b-446b-a5e8-f42c6aa65ed7\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8jt67" Nov 26 23:01:11 crc kubenswrapper[4903]: I1126 23:01:11.291109 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nw4vx\" (UniqueName: \"kubernetes.io/projected/402f509f-516b-446b-a5e8-f42c6aa65ed7-kube-api-access-nw4vx\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-8jt67\" (UID: \"402f509f-516b-446b-a5e8-f42c6aa65ed7\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8jt67" Nov 26 23:01:11 crc kubenswrapper[4903]: I1126 23:01:11.375073 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8jt67" Nov 26 23:01:11 crc kubenswrapper[4903]: I1126 23:01:11.981576 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8jt67"] Nov 26 23:01:12 crc kubenswrapper[4903]: I1126 23:01:12.996937 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8jt67" event={"ID":"402f509f-516b-446b-a5e8-f42c6aa65ed7","Type":"ContainerStarted","Data":"ce54f84bebd9fd038ef02a42f5e063c3a987cca7f0d7524826573e4e887cbfde"} Nov 26 23:01:13 crc kubenswrapper[4903]: I1126 23:01:13.123602 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 23:01:14 crc kubenswrapper[4903]: I1126 23:01:14.012996 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8jt67" event={"ID":"402f509f-516b-446b-a5e8-f42c6aa65ed7","Type":"ContainerStarted","Data":"635474b2868328de785188fd5109825fe3b13ab5f37546b8dddbc57052b6c1bc"} Nov 26 23:01:14 crc kubenswrapper[4903]: I1126 23:01:14.043620 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8jt67" podStartSLOduration=1.917278689 podStartE2EDuration="3.043594566s" podCreationTimestamp="2025-11-26 23:01:11 +0000 UTC" firstStartedPulling="2025-11-26 23:01:11.992294248 +0000 UTC m=+2400.682529198" lastFinishedPulling="2025-11-26 23:01:13.118610125 +0000 UTC m=+2401.808845075" observedRunningTime="2025-11-26 23:01:14.034889504 +0000 UTC m=+2402.725124474" watchObservedRunningTime="2025-11-26 23:01:14.043594566 +0000 UTC m=+2402.733829506" Nov 26 23:01:31 crc kubenswrapper[4903]: I1126 23:01:31.981304 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 23:01:31 crc kubenswrapper[4903]: I1126 23:01:31.982135 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 23:01:31 crc kubenswrapper[4903]: I1126 23:01:31.982224 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 23:01:31 crc kubenswrapper[4903]: I1126 23:01:31.983502 4903 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152"} pod="openshift-machine-config-operator/machine-config-daemon-wjwph" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 23:01:31 crc kubenswrapper[4903]: I1126 23:01:31.983622 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" containerID="cri-o://e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152" gracePeriod=600 Nov 26 23:01:32 crc kubenswrapper[4903]: E1126 23:01:32.111392 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:01:32 crc kubenswrapper[4903]: I1126 23:01:32.275412 4903 generic.go:334] "Generic (PLEG): container finished" podID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerID="e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152" exitCode=0 Nov 26 23:01:32 crc kubenswrapper[4903]: I1126 23:01:32.275460 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerDied","Data":"e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152"} Nov 26 23:01:32 crc kubenswrapper[4903]: I1126 23:01:32.275499 4903 scope.go:117] "RemoveContainer" containerID="ee7a8eeaefe3eb0640adea0e74cd3d203e8a5b31c2b6d9bcb6062010871eadbc" Nov 26 23:01:32 crc kubenswrapper[4903]: I1126 23:01:32.276559 4903 scope.go:117] "RemoveContainer" containerID="e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152" Nov 26 23:01:32 crc kubenswrapper[4903]: E1126 23:01:32.277162 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:01:47 crc kubenswrapper[4903]: I1126 23:01:47.028892 4903 scope.go:117] "RemoveContainer" containerID="e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152" Nov 26 23:01:47 crc kubenswrapper[4903]: E1126 23:01:47.029828 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:02:01 crc kubenswrapper[4903]: I1126 23:02:01.029388 4903 scope.go:117] "RemoveContainer" containerID="e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152" Nov 26 23:02:01 crc kubenswrapper[4903]: E1126 23:02:01.030369 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:02:16 crc kubenswrapper[4903]: I1126 23:02:16.029999 4903 scope.go:117] "RemoveContainer" containerID="e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152" Nov 26 23:02:16 crc kubenswrapper[4903]: E1126 23:02:16.031863 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:02:30 crc kubenswrapper[4903]: I1126 23:02:30.030874 4903 scope.go:117] "RemoveContainer" containerID="e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152" Nov 26 23:02:30 crc kubenswrapper[4903]: E1126 23:02:30.032523 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:02:44 crc kubenswrapper[4903]: I1126 23:02:44.030248 4903 scope.go:117] "RemoveContainer" containerID="e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152" Nov 26 23:02:44 crc kubenswrapper[4903]: E1126 23:02:44.031406 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:02:56 crc kubenswrapper[4903]: I1126 23:02:56.029972 4903 scope.go:117] "RemoveContainer" containerID="e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152" Nov 26 23:02:56 crc kubenswrapper[4903]: E1126 23:02:56.031082 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:03:07 crc kubenswrapper[4903]: I1126 23:03:07.029523 4903 scope.go:117] "RemoveContainer" containerID="e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152" Nov 26 23:03:07 crc kubenswrapper[4903]: E1126 23:03:07.030469 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:03:22 crc kubenswrapper[4903]: I1126 23:03:22.043055 4903 scope.go:117] "RemoveContainer" containerID="e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152" Nov 26 23:03:22 crc kubenswrapper[4903]: E1126 23:03:22.044056 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:03:36 crc kubenswrapper[4903]: I1126 23:03:36.029228 4903 scope.go:117] "RemoveContainer" containerID="e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152" Nov 26 23:03:36 crc kubenswrapper[4903]: E1126 23:03:36.030201 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:03:51 crc kubenswrapper[4903]: I1126 23:03:51.029522 4903 scope.go:117] "RemoveContainer" containerID="e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152" Nov 26 23:03:51 crc kubenswrapper[4903]: E1126 23:03:51.030651 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:04:03 crc kubenswrapper[4903]: I1126 23:04:03.028036 4903 scope.go:117] "RemoveContainer" containerID="e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152" Nov 26 23:04:03 crc kubenswrapper[4903]: E1126 23:04:03.028939 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:04:14 crc kubenswrapper[4903]: I1126 23:04:14.029556 4903 scope.go:117] "RemoveContainer" containerID="e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152" Nov 26 23:04:14 crc kubenswrapper[4903]: E1126 23:04:14.030552 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:04:29 crc kubenswrapper[4903]: I1126 23:04:29.029426 4903 scope.go:117] "RemoveContainer" containerID="e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152" Nov 26 23:04:29 crc kubenswrapper[4903]: E1126 23:04:29.030371 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:04:40 crc kubenswrapper[4903]: I1126 23:04:40.029922 4903 scope.go:117] "RemoveContainer" containerID="e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152" Nov 26 23:04:40 crc kubenswrapper[4903]: E1126 23:04:40.031139 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:04:52 crc kubenswrapper[4903]: I1126 23:04:52.038435 4903 scope.go:117] "RemoveContainer" containerID="e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152" Nov 26 23:04:52 crc kubenswrapper[4903]: E1126 23:04:52.039232 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:05:05 crc kubenswrapper[4903]: I1126 23:05:05.030040 4903 scope.go:117] "RemoveContainer" containerID="e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152" Nov 26 23:05:05 crc kubenswrapper[4903]: E1126 23:05:05.031038 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:05:18 crc kubenswrapper[4903]: I1126 23:05:18.033064 4903 scope.go:117] "RemoveContainer" containerID="e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152" Nov 26 23:05:18 crc kubenswrapper[4903]: E1126 23:05:18.034202 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:05:29 crc kubenswrapper[4903]: I1126 23:05:29.029117 4903 scope.go:117] "RemoveContainer" containerID="e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152" Nov 26 23:05:29 crc kubenswrapper[4903]: E1126 23:05:29.031416 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:05:40 crc kubenswrapper[4903]: I1126 23:05:40.029949 4903 scope.go:117] "RemoveContainer" containerID="e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152" Nov 26 23:05:40 crc kubenswrapper[4903]: E1126 23:05:40.031120 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:05:51 crc kubenswrapper[4903]: I1126 23:05:51.029081 4903 scope.go:117] "RemoveContainer" containerID="e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152" Nov 26 23:05:51 crc kubenswrapper[4903]: E1126 23:05:51.031596 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:05:57 crc kubenswrapper[4903]: I1126 23:05:57.586212 4903 generic.go:334] "Generic (PLEG): container finished" podID="402f509f-516b-446b-a5e8-f42c6aa65ed7" containerID="635474b2868328de785188fd5109825fe3b13ab5f37546b8dddbc57052b6c1bc" exitCode=0 Nov 26 23:05:57 crc kubenswrapper[4903]: I1126 23:05:57.586293 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8jt67" event={"ID":"402f509f-516b-446b-a5e8-f42c6aa65ed7","Type":"ContainerDied","Data":"635474b2868328de785188fd5109825fe3b13ab5f37546b8dddbc57052b6c1bc"} Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.120624 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8jt67" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.293343 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/402f509f-516b-446b-a5e8-f42c6aa65ed7-libvirt-combined-ca-bundle\") pod \"402f509f-516b-446b-a5e8-f42c6aa65ed7\" (UID: \"402f509f-516b-446b-a5e8-f42c6aa65ed7\") " Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.293757 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nw4vx\" (UniqueName: \"kubernetes.io/projected/402f509f-516b-446b-a5e8-f42c6aa65ed7-kube-api-access-nw4vx\") pod \"402f509f-516b-446b-a5e8-f42c6aa65ed7\" (UID: \"402f509f-516b-446b-a5e8-f42c6aa65ed7\") " Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.293805 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/402f509f-516b-446b-a5e8-f42c6aa65ed7-libvirt-secret-0\") pod \"402f509f-516b-446b-a5e8-f42c6aa65ed7\" (UID: \"402f509f-516b-446b-a5e8-f42c6aa65ed7\") " Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.293902 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/402f509f-516b-446b-a5e8-f42c6aa65ed7-ssh-key\") pod \"402f509f-516b-446b-a5e8-f42c6aa65ed7\" (UID: \"402f509f-516b-446b-a5e8-f42c6aa65ed7\") " Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.294007 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/402f509f-516b-446b-a5e8-f42c6aa65ed7-inventory\") pod \"402f509f-516b-446b-a5e8-f42c6aa65ed7\" (UID: \"402f509f-516b-446b-a5e8-f42c6aa65ed7\") " Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.301380 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/402f509f-516b-446b-a5e8-f42c6aa65ed7-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "402f509f-516b-446b-a5e8-f42c6aa65ed7" (UID: "402f509f-516b-446b-a5e8-f42c6aa65ed7"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.302770 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/402f509f-516b-446b-a5e8-f42c6aa65ed7-kube-api-access-nw4vx" (OuterVolumeSpecName: "kube-api-access-nw4vx") pod "402f509f-516b-446b-a5e8-f42c6aa65ed7" (UID: "402f509f-516b-446b-a5e8-f42c6aa65ed7"). InnerVolumeSpecName "kube-api-access-nw4vx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.348795 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/402f509f-516b-446b-a5e8-f42c6aa65ed7-inventory" (OuterVolumeSpecName: "inventory") pod "402f509f-516b-446b-a5e8-f42c6aa65ed7" (UID: "402f509f-516b-446b-a5e8-f42c6aa65ed7"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.351504 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/402f509f-516b-446b-a5e8-f42c6aa65ed7-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "402f509f-516b-446b-a5e8-f42c6aa65ed7" (UID: "402f509f-516b-446b-a5e8-f42c6aa65ed7"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.353040 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/402f509f-516b-446b-a5e8-f42c6aa65ed7-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "402f509f-516b-446b-a5e8-f42c6aa65ed7" (UID: "402f509f-516b-446b-a5e8-f42c6aa65ed7"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.401092 4903 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/402f509f-516b-446b-a5e8-f42c6aa65ed7-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.401154 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nw4vx\" (UniqueName: \"kubernetes.io/projected/402f509f-516b-446b-a5e8-f42c6aa65ed7-kube-api-access-nw4vx\") on node \"crc\" DevicePath \"\"" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.401181 4903 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/402f509f-516b-446b-a5e8-f42c6aa65ed7-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.401203 4903 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/402f509f-516b-446b-a5e8-f42c6aa65ed7-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.401224 4903 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/402f509f-516b-446b-a5e8-f42c6aa65ed7-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.638925 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8jt67" event={"ID":"402f509f-516b-446b-a5e8-f42c6aa65ed7","Type":"ContainerDied","Data":"ce54f84bebd9fd038ef02a42f5e063c3a987cca7f0d7524826573e4e887cbfde"} Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.638990 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ce54f84bebd9fd038ef02a42f5e063c3a987cca7f0d7524826573e4e887cbfde" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.639085 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-8jt67" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.764062 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j"] Nov 26 23:05:59 crc kubenswrapper[4903]: E1126 23:05:59.764818 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="402f509f-516b-446b-a5e8-f42c6aa65ed7" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.764892 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="402f509f-516b-446b-a5e8-f42c6aa65ed7" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.765171 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="402f509f-516b-446b-a5e8-f42c6aa65ed7" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.766101 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.773674 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.774121 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.774991 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.775391 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.775459 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.775584 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-62bf2" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.775843 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.777676 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j"] Nov 26 23:05:59 crc kubenswrapper[4903]: E1126 23:05:59.815322 4903 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod402f509f_516b_446b_a5e8_f42c6aa65ed7.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod402f509f_516b_446b_a5e8_f42c6aa65ed7.slice/crio-ce54f84bebd9fd038ef02a42f5e063c3a987cca7f0d7524826573e4e887cbfde\": RecentStats: unable to find data in memory cache]" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.827265 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5l7j\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.827306 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5l7j\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.827333 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5l7j\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.827446 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5l7j\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.827470 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5l7j\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.827534 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5l7j\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.827773 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2v6d\" (UniqueName: \"kubernetes.io/projected/be764009-e30d-4394-b38c-83996b86b9e1-kube-api-access-m2v6d\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5l7j\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.827838 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/be764009-e30d-4394-b38c-83996b86b9e1-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5l7j\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.827882 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5l7j\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.929531 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5l7j\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.929638 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5l7j\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.929670 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5l7j\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.929734 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5l7j\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.929844 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5l7j\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.929870 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5l7j\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.929964 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5l7j\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.930072 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2v6d\" (UniqueName: \"kubernetes.io/projected/be764009-e30d-4394-b38c-83996b86b9e1-kube-api-access-m2v6d\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5l7j\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.930123 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/be764009-e30d-4394-b38c-83996b86b9e1-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5l7j\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.931518 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/be764009-e30d-4394-b38c-83996b86b9e1-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5l7j\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.933385 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5l7j\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.934638 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5l7j\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.936525 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5l7j\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.936758 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5l7j\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.938002 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5l7j\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.938287 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5l7j\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.938992 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5l7j\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:05:59 crc kubenswrapper[4903]: I1126 23:05:59.958959 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2v6d\" (UniqueName: \"kubernetes.io/projected/be764009-e30d-4394-b38c-83996b86b9e1-kube-api-access-m2v6d\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r5l7j\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:06:00 crc kubenswrapper[4903]: I1126 23:06:00.087466 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:06:00 crc kubenswrapper[4903]: I1126 23:06:00.685474 4903 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 23:06:00 crc kubenswrapper[4903]: I1126 23:06:00.691311 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j"] Nov 26 23:06:01 crc kubenswrapper[4903]: I1126 23:06:01.691470 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" event={"ID":"be764009-e30d-4394-b38c-83996b86b9e1","Type":"ContainerStarted","Data":"7d254b0db679751672a721dd46075f144e2b08dd454882c32324e2d2df7c6b60"} Nov 26 23:06:01 crc kubenswrapper[4903]: I1126 23:06:01.699946 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" event={"ID":"be764009-e30d-4394-b38c-83996b86b9e1","Type":"ContainerStarted","Data":"07375581ceb6998d1535baf679999ce4b87e7b7983cf0e0725d4f401a57adeb8"} Nov 26 23:06:01 crc kubenswrapper[4903]: I1126 23:06:01.724532 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" podStartSLOduration=2.224968573 podStartE2EDuration="2.724511571s" podCreationTimestamp="2025-11-26 23:05:59 +0000 UTC" firstStartedPulling="2025-11-26 23:06:00.685260895 +0000 UTC m=+2689.375495805" lastFinishedPulling="2025-11-26 23:06:01.184803853 +0000 UTC m=+2689.875038803" observedRunningTime="2025-11-26 23:06:01.713536949 +0000 UTC m=+2690.403771859" watchObservedRunningTime="2025-11-26 23:06:01.724511571 +0000 UTC m=+2690.414746481" Nov 26 23:06:04 crc kubenswrapper[4903]: I1126 23:06:04.028405 4903 scope.go:117] "RemoveContainer" containerID="e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152" Nov 26 23:06:04 crc kubenswrapper[4903]: E1126 23:06:04.029220 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:06:15 crc kubenswrapper[4903]: I1126 23:06:15.028945 4903 scope.go:117] "RemoveContainer" containerID="e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152" Nov 26 23:06:15 crc kubenswrapper[4903]: E1126 23:06:15.029948 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:06:28 crc kubenswrapper[4903]: I1126 23:06:28.029454 4903 scope.go:117] "RemoveContainer" containerID="e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152" Nov 26 23:06:28 crc kubenswrapper[4903]: E1126 23:06:28.030416 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:06:39 crc kubenswrapper[4903]: I1126 23:06:39.029240 4903 scope.go:117] "RemoveContainer" containerID="e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152" Nov 26 23:06:40 crc kubenswrapper[4903]: I1126 23:06:40.334998 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerStarted","Data":"b4f2997ac38c47b9761af8aec04555c21367d1561cc50b6d8dc9ce9b0946fee7"} Nov 26 23:06:58 crc kubenswrapper[4903]: I1126 23:06:58.214931 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-2dj28"] Nov 26 23:06:58 crc kubenswrapper[4903]: I1126 23:06:58.222803 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2dj28" Nov 26 23:06:58 crc kubenswrapper[4903]: I1126 23:06:58.236121 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2dj28"] Nov 26 23:06:58 crc kubenswrapper[4903]: I1126 23:06:58.325558 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36e82be5-bd71-4f45-bac4-bee4b5864e00-utilities\") pod \"redhat-operators-2dj28\" (UID: \"36e82be5-bd71-4f45-bac4-bee4b5864e00\") " pod="openshift-marketplace/redhat-operators-2dj28" Nov 26 23:06:58 crc kubenswrapper[4903]: I1126 23:06:58.325608 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c949r\" (UniqueName: \"kubernetes.io/projected/36e82be5-bd71-4f45-bac4-bee4b5864e00-kube-api-access-c949r\") pod \"redhat-operators-2dj28\" (UID: \"36e82be5-bd71-4f45-bac4-bee4b5864e00\") " pod="openshift-marketplace/redhat-operators-2dj28" Nov 26 23:06:58 crc kubenswrapper[4903]: I1126 23:06:58.325631 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36e82be5-bd71-4f45-bac4-bee4b5864e00-catalog-content\") pod \"redhat-operators-2dj28\" (UID: \"36e82be5-bd71-4f45-bac4-bee4b5864e00\") " pod="openshift-marketplace/redhat-operators-2dj28" Nov 26 23:06:58 crc kubenswrapper[4903]: I1126 23:06:58.427518 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36e82be5-bd71-4f45-bac4-bee4b5864e00-utilities\") pod \"redhat-operators-2dj28\" (UID: \"36e82be5-bd71-4f45-bac4-bee4b5864e00\") " pod="openshift-marketplace/redhat-operators-2dj28" Nov 26 23:06:58 crc kubenswrapper[4903]: I1126 23:06:58.427570 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c949r\" (UniqueName: \"kubernetes.io/projected/36e82be5-bd71-4f45-bac4-bee4b5864e00-kube-api-access-c949r\") pod \"redhat-operators-2dj28\" (UID: \"36e82be5-bd71-4f45-bac4-bee4b5864e00\") " pod="openshift-marketplace/redhat-operators-2dj28" Nov 26 23:06:58 crc kubenswrapper[4903]: I1126 23:06:58.427591 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36e82be5-bd71-4f45-bac4-bee4b5864e00-catalog-content\") pod \"redhat-operators-2dj28\" (UID: \"36e82be5-bd71-4f45-bac4-bee4b5864e00\") " pod="openshift-marketplace/redhat-operators-2dj28" Nov 26 23:06:58 crc kubenswrapper[4903]: I1126 23:06:58.428136 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36e82be5-bd71-4f45-bac4-bee4b5864e00-utilities\") pod \"redhat-operators-2dj28\" (UID: \"36e82be5-bd71-4f45-bac4-bee4b5864e00\") " pod="openshift-marketplace/redhat-operators-2dj28" Nov 26 23:06:58 crc kubenswrapper[4903]: I1126 23:06:58.428161 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36e82be5-bd71-4f45-bac4-bee4b5864e00-catalog-content\") pod \"redhat-operators-2dj28\" (UID: \"36e82be5-bd71-4f45-bac4-bee4b5864e00\") " pod="openshift-marketplace/redhat-operators-2dj28" Nov 26 23:06:58 crc kubenswrapper[4903]: I1126 23:06:58.446179 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c949r\" (UniqueName: \"kubernetes.io/projected/36e82be5-bd71-4f45-bac4-bee4b5864e00-kube-api-access-c949r\") pod \"redhat-operators-2dj28\" (UID: \"36e82be5-bd71-4f45-bac4-bee4b5864e00\") " pod="openshift-marketplace/redhat-operators-2dj28" Nov 26 23:06:58 crc kubenswrapper[4903]: I1126 23:06:58.546470 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2dj28" Nov 26 23:06:59 crc kubenswrapper[4903]: I1126 23:06:59.066393 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2dj28"] Nov 26 23:06:59 crc kubenswrapper[4903]: I1126 23:06:59.616369 4903 generic.go:334] "Generic (PLEG): container finished" podID="36e82be5-bd71-4f45-bac4-bee4b5864e00" containerID="97806e6fa0c4ae1cdceb97735af51e1dfd47bf63b110d96efe067c0c0f0db276" exitCode=0 Nov 26 23:06:59 crc kubenswrapper[4903]: I1126 23:06:59.616457 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2dj28" event={"ID":"36e82be5-bd71-4f45-bac4-bee4b5864e00","Type":"ContainerDied","Data":"97806e6fa0c4ae1cdceb97735af51e1dfd47bf63b110d96efe067c0c0f0db276"} Nov 26 23:06:59 crc kubenswrapper[4903]: I1126 23:06:59.616670 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2dj28" event={"ID":"36e82be5-bd71-4f45-bac4-bee4b5864e00","Type":"ContainerStarted","Data":"e862dc9073ae6461895320163861811c6f67dc141a368192b14ac51752e19df8"} Nov 26 23:07:01 crc kubenswrapper[4903]: I1126 23:07:01.646075 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2dj28" event={"ID":"36e82be5-bd71-4f45-bac4-bee4b5864e00","Type":"ContainerStarted","Data":"5c88c1a719f97f0296a76d4a55f9ab3c27ea0ea0e37c262a204ecd00215b1932"} Nov 26 23:07:04 crc kubenswrapper[4903]: I1126 23:07:04.682963 4903 generic.go:334] "Generic (PLEG): container finished" podID="36e82be5-bd71-4f45-bac4-bee4b5864e00" containerID="5c88c1a719f97f0296a76d4a55f9ab3c27ea0ea0e37c262a204ecd00215b1932" exitCode=0 Nov 26 23:07:04 crc kubenswrapper[4903]: I1126 23:07:04.683651 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2dj28" event={"ID":"36e82be5-bd71-4f45-bac4-bee4b5864e00","Type":"ContainerDied","Data":"5c88c1a719f97f0296a76d4a55f9ab3c27ea0ea0e37c262a204ecd00215b1932"} Nov 26 23:07:06 crc kubenswrapper[4903]: I1126 23:07:06.716094 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2dj28" event={"ID":"36e82be5-bd71-4f45-bac4-bee4b5864e00","Type":"ContainerStarted","Data":"8ab37272de4a8a7af8fe29fceb6d82025bb90a4da6a89b1eb1303997a492088b"} Nov 26 23:07:06 crc kubenswrapper[4903]: I1126 23:07:06.750103 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-2dj28" podStartSLOduration=2.483866758 podStartE2EDuration="8.75007854s" podCreationTimestamp="2025-11-26 23:06:58 +0000 UTC" firstStartedPulling="2025-11-26 23:06:59.618160328 +0000 UTC m=+2748.308395248" lastFinishedPulling="2025-11-26 23:07:05.88437208 +0000 UTC m=+2754.574607030" observedRunningTime="2025-11-26 23:07:06.740288608 +0000 UTC m=+2755.430523558" watchObservedRunningTime="2025-11-26 23:07:06.75007854 +0000 UTC m=+2755.440313450" Nov 26 23:07:08 crc kubenswrapper[4903]: I1126 23:07:08.547084 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-2dj28" Nov 26 23:07:08 crc kubenswrapper[4903]: I1126 23:07:08.547596 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-2dj28" Nov 26 23:07:09 crc kubenswrapper[4903]: I1126 23:07:09.613418 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-2dj28" podUID="36e82be5-bd71-4f45-bac4-bee4b5864e00" containerName="registry-server" probeResult="failure" output=< Nov 26 23:07:09 crc kubenswrapper[4903]: timeout: failed to connect service ":50051" within 1s Nov 26 23:07:09 crc kubenswrapper[4903]: > Nov 26 23:07:18 crc kubenswrapper[4903]: I1126 23:07:18.619871 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-2dj28" Nov 26 23:07:18 crc kubenswrapper[4903]: I1126 23:07:18.696941 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-2dj28" Nov 26 23:07:18 crc kubenswrapper[4903]: I1126 23:07:18.871854 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2dj28"] Nov 26 23:07:19 crc kubenswrapper[4903]: I1126 23:07:19.898925 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-2dj28" podUID="36e82be5-bd71-4f45-bac4-bee4b5864e00" containerName="registry-server" containerID="cri-o://8ab37272de4a8a7af8fe29fceb6d82025bb90a4da6a89b1eb1303997a492088b" gracePeriod=2 Nov 26 23:07:20 crc kubenswrapper[4903]: I1126 23:07:20.530163 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2dj28" Nov 26 23:07:20 crc kubenswrapper[4903]: I1126 23:07:20.591612 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c949r\" (UniqueName: \"kubernetes.io/projected/36e82be5-bd71-4f45-bac4-bee4b5864e00-kube-api-access-c949r\") pod \"36e82be5-bd71-4f45-bac4-bee4b5864e00\" (UID: \"36e82be5-bd71-4f45-bac4-bee4b5864e00\") " Nov 26 23:07:20 crc kubenswrapper[4903]: I1126 23:07:20.591823 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36e82be5-bd71-4f45-bac4-bee4b5864e00-catalog-content\") pod \"36e82be5-bd71-4f45-bac4-bee4b5864e00\" (UID: \"36e82be5-bd71-4f45-bac4-bee4b5864e00\") " Nov 26 23:07:20 crc kubenswrapper[4903]: I1126 23:07:20.592000 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36e82be5-bd71-4f45-bac4-bee4b5864e00-utilities\") pod \"36e82be5-bd71-4f45-bac4-bee4b5864e00\" (UID: \"36e82be5-bd71-4f45-bac4-bee4b5864e00\") " Nov 26 23:07:20 crc kubenswrapper[4903]: I1126 23:07:20.593026 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36e82be5-bd71-4f45-bac4-bee4b5864e00-utilities" (OuterVolumeSpecName: "utilities") pod "36e82be5-bd71-4f45-bac4-bee4b5864e00" (UID: "36e82be5-bd71-4f45-bac4-bee4b5864e00"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:07:20 crc kubenswrapper[4903]: I1126 23:07:20.593911 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36e82be5-bd71-4f45-bac4-bee4b5864e00-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 23:07:20 crc kubenswrapper[4903]: I1126 23:07:20.604896 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36e82be5-bd71-4f45-bac4-bee4b5864e00-kube-api-access-c949r" (OuterVolumeSpecName: "kube-api-access-c949r") pod "36e82be5-bd71-4f45-bac4-bee4b5864e00" (UID: "36e82be5-bd71-4f45-bac4-bee4b5864e00"). InnerVolumeSpecName "kube-api-access-c949r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:07:20 crc kubenswrapper[4903]: I1126 23:07:20.697169 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c949r\" (UniqueName: \"kubernetes.io/projected/36e82be5-bd71-4f45-bac4-bee4b5864e00-kube-api-access-c949r\") on node \"crc\" DevicePath \"\"" Nov 26 23:07:20 crc kubenswrapper[4903]: I1126 23:07:20.699253 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36e82be5-bd71-4f45-bac4-bee4b5864e00-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "36e82be5-bd71-4f45-bac4-bee4b5864e00" (UID: "36e82be5-bd71-4f45-bac4-bee4b5864e00"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:07:20 crc kubenswrapper[4903]: I1126 23:07:20.798452 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36e82be5-bd71-4f45-bac4-bee4b5864e00-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 23:07:20 crc kubenswrapper[4903]: I1126 23:07:20.916342 4903 generic.go:334] "Generic (PLEG): container finished" podID="36e82be5-bd71-4f45-bac4-bee4b5864e00" containerID="8ab37272de4a8a7af8fe29fceb6d82025bb90a4da6a89b1eb1303997a492088b" exitCode=0 Nov 26 23:07:20 crc kubenswrapper[4903]: I1126 23:07:20.916387 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2dj28" event={"ID":"36e82be5-bd71-4f45-bac4-bee4b5864e00","Type":"ContainerDied","Data":"8ab37272de4a8a7af8fe29fceb6d82025bb90a4da6a89b1eb1303997a492088b"} Nov 26 23:07:20 crc kubenswrapper[4903]: I1126 23:07:20.916437 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2dj28" Nov 26 23:07:20 crc kubenswrapper[4903]: I1126 23:07:20.916459 4903 scope.go:117] "RemoveContainer" containerID="8ab37272de4a8a7af8fe29fceb6d82025bb90a4da6a89b1eb1303997a492088b" Nov 26 23:07:20 crc kubenswrapper[4903]: I1126 23:07:20.916444 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2dj28" event={"ID":"36e82be5-bd71-4f45-bac4-bee4b5864e00","Type":"ContainerDied","Data":"e862dc9073ae6461895320163861811c6f67dc141a368192b14ac51752e19df8"} Nov 26 23:07:20 crc kubenswrapper[4903]: I1126 23:07:20.958526 4903 scope.go:117] "RemoveContainer" containerID="5c88c1a719f97f0296a76d4a55f9ab3c27ea0ea0e37c262a204ecd00215b1932" Nov 26 23:07:20 crc kubenswrapper[4903]: I1126 23:07:20.960314 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2dj28"] Nov 26 23:07:20 crc kubenswrapper[4903]: I1126 23:07:20.975745 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-2dj28"] Nov 26 23:07:20 crc kubenswrapper[4903]: I1126 23:07:20.995267 4903 scope.go:117] "RemoveContainer" containerID="97806e6fa0c4ae1cdceb97735af51e1dfd47bf63b110d96efe067c0c0f0db276" Nov 26 23:07:21 crc kubenswrapper[4903]: I1126 23:07:21.040649 4903 scope.go:117] "RemoveContainer" containerID="8ab37272de4a8a7af8fe29fceb6d82025bb90a4da6a89b1eb1303997a492088b" Nov 26 23:07:21 crc kubenswrapper[4903]: E1126 23:07:21.041431 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8ab37272de4a8a7af8fe29fceb6d82025bb90a4da6a89b1eb1303997a492088b\": container with ID starting with 8ab37272de4a8a7af8fe29fceb6d82025bb90a4da6a89b1eb1303997a492088b not found: ID does not exist" containerID="8ab37272de4a8a7af8fe29fceb6d82025bb90a4da6a89b1eb1303997a492088b" Nov 26 23:07:21 crc kubenswrapper[4903]: I1126 23:07:21.041500 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8ab37272de4a8a7af8fe29fceb6d82025bb90a4da6a89b1eb1303997a492088b"} err="failed to get container status \"8ab37272de4a8a7af8fe29fceb6d82025bb90a4da6a89b1eb1303997a492088b\": rpc error: code = NotFound desc = could not find container \"8ab37272de4a8a7af8fe29fceb6d82025bb90a4da6a89b1eb1303997a492088b\": container with ID starting with 8ab37272de4a8a7af8fe29fceb6d82025bb90a4da6a89b1eb1303997a492088b not found: ID does not exist" Nov 26 23:07:21 crc kubenswrapper[4903]: I1126 23:07:21.041552 4903 scope.go:117] "RemoveContainer" containerID="5c88c1a719f97f0296a76d4a55f9ab3c27ea0ea0e37c262a204ecd00215b1932" Nov 26 23:07:21 crc kubenswrapper[4903]: E1126 23:07:21.042002 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5c88c1a719f97f0296a76d4a55f9ab3c27ea0ea0e37c262a204ecd00215b1932\": container with ID starting with 5c88c1a719f97f0296a76d4a55f9ab3c27ea0ea0e37c262a204ecd00215b1932 not found: ID does not exist" containerID="5c88c1a719f97f0296a76d4a55f9ab3c27ea0ea0e37c262a204ecd00215b1932" Nov 26 23:07:21 crc kubenswrapper[4903]: I1126 23:07:21.042055 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c88c1a719f97f0296a76d4a55f9ab3c27ea0ea0e37c262a204ecd00215b1932"} err="failed to get container status \"5c88c1a719f97f0296a76d4a55f9ab3c27ea0ea0e37c262a204ecd00215b1932\": rpc error: code = NotFound desc = could not find container \"5c88c1a719f97f0296a76d4a55f9ab3c27ea0ea0e37c262a204ecd00215b1932\": container with ID starting with 5c88c1a719f97f0296a76d4a55f9ab3c27ea0ea0e37c262a204ecd00215b1932 not found: ID does not exist" Nov 26 23:07:21 crc kubenswrapper[4903]: I1126 23:07:21.042074 4903 scope.go:117] "RemoveContainer" containerID="97806e6fa0c4ae1cdceb97735af51e1dfd47bf63b110d96efe067c0c0f0db276" Nov 26 23:07:21 crc kubenswrapper[4903]: E1126 23:07:21.042357 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"97806e6fa0c4ae1cdceb97735af51e1dfd47bf63b110d96efe067c0c0f0db276\": container with ID starting with 97806e6fa0c4ae1cdceb97735af51e1dfd47bf63b110d96efe067c0c0f0db276 not found: ID does not exist" containerID="97806e6fa0c4ae1cdceb97735af51e1dfd47bf63b110d96efe067c0c0f0db276" Nov 26 23:07:21 crc kubenswrapper[4903]: I1126 23:07:21.042413 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"97806e6fa0c4ae1cdceb97735af51e1dfd47bf63b110d96efe067c0c0f0db276"} err="failed to get container status \"97806e6fa0c4ae1cdceb97735af51e1dfd47bf63b110d96efe067c0c0f0db276\": rpc error: code = NotFound desc = could not find container \"97806e6fa0c4ae1cdceb97735af51e1dfd47bf63b110d96efe067c0c0f0db276\": container with ID starting with 97806e6fa0c4ae1cdceb97735af51e1dfd47bf63b110d96efe067c0c0f0db276 not found: ID does not exist" Nov 26 23:07:22 crc kubenswrapper[4903]: I1126 23:07:22.043302 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36e82be5-bd71-4f45-bac4-bee4b5864e00" path="/var/lib/kubelet/pods/36e82be5-bd71-4f45-bac4-bee4b5864e00/volumes" Nov 26 23:07:53 crc kubenswrapper[4903]: I1126 23:07:53.932186 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-nrxwx"] Nov 26 23:07:53 crc kubenswrapper[4903]: E1126 23:07:53.933572 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36e82be5-bd71-4f45-bac4-bee4b5864e00" containerName="registry-server" Nov 26 23:07:53 crc kubenswrapper[4903]: I1126 23:07:53.933598 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="36e82be5-bd71-4f45-bac4-bee4b5864e00" containerName="registry-server" Nov 26 23:07:53 crc kubenswrapper[4903]: E1126 23:07:53.933617 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36e82be5-bd71-4f45-bac4-bee4b5864e00" containerName="extract-content" Nov 26 23:07:53 crc kubenswrapper[4903]: I1126 23:07:53.933628 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="36e82be5-bd71-4f45-bac4-bee4b5864e00" containerName="extract-content" Nov 26 23:07:53 crc kubenswrapper[4903]: E1126 23:07:53.933666 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36e82be5-bd71-4f45-bac4-bee4b5864e00" containerName="extract-utilities" Nov 26 23:07:53 crc kubenswrapper[4903]: I1126 23:07:53.933835 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="36e82be5-bd71-4f45-bac4-bee4b5864e00" containerName="extract-utilities" Nov 26 23:07:53 crc kubenswrapper[4903]: I1126 23:07:53.934331 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="36e82be5-bd71-4f45-bac4-bee4b5864e00" containerName="registry-server" Nov 26 23:07:53 crc kubenswrapper[4903]: I1126 23:07:53.937341 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nrxwx" Nov 26 23:07:53 crc kubenswrapper[4903]: I1126 23:07:53.974739 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nrxwx"] Nov 26 23:07:54 crc kubenswrapper[4903]: I1126 23:07:54.014476 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5mjfr\" (UniqueName: \"kubernetes.io/projected/12fbdd44-86a6-4cb7-9d31-c6d524e93768-kube-api-access-5mjfr\") pod \"community-operators-nrxwx\" (UID: \"12fbdd44-86a6-4cb7-9d31-c6d524e93768\") " pod="openshift-marketplace/community-operators-nrxwx" Nov 26 23:07:54 crc kubenswrapper[4903]: I1126 23:07:54.014558 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12fbdd44-86a6-4cb7-9d31-c6d524e93768-utilities\") pod \"community-operators-nrxwx\" (UID: \"12fbdd44-86a6-4cb7-9d31-c6d524e93768\") " pod="openshift-marketplace/community-operators-nrxwx" Nov 26 23:07:54 crc kubenswrapper[4903]: I1126 23:07:54.014581 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12fbdd44-86a6-4cb7-9d31-c6d524e93768-catalog-content\") pod \"community-operators-nrxwx\" (UID: \"12fbdd44-86a6-4cb7-9d31-c6d524e93768\") " pod="openshift-marketplace/community-operators-nrxwx" Nov 26 23:07:54 crc kubenswrapper[4903]: I1126 23:07:54.115999 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12fbdd44-86a6-4cb7-9d31-c6d524e93768-utilities\") pod \"community-operators-nrxwx\" (UID: \"12fbdd44-86a6-4cb7-9d31-c6d524e93768\") " pod="openshift-marketplace/community-operators-nrxwx" Nov 26 23:07:54 crc kubenswrapper[4903]: I1126 23:07:54.116063 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12fbdd44-86a6-4cb7-9d31-c6d524e93768-catalog-content\") pod \"community-operators-nrxwx\" (UID: \"12fbdd44-86a6-4cb7-9d31-c6d524e93768\") " pod="openshift-marketplace/community-operators-nrxwx" Nov 26 23:07:54 crc kubenswrapper[4903]: I1126 23:07:54.116441 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5mjfr\" (UniqueName: \"kubernetes.io/projected/12fbdd44-86a6-4cb7-9d31-c6d524e93768-kube-api-access-5mjfr\") pod \"community-operators-nrxwx\" (UID: \"12fbdd44-86a6-4cb7-9d31-c6d524e93768\") " pod="openshift-marketplace/community-operators-nrxwx" Nov 26 23:07:54 crc kubenswrapper[4903]: I1126 23:07:54.118461 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12fbdd44-86a6-4cb7-9d31-c6d524e93768-utilities\") pod \"community-operators-nrxwx\" (UID: \"12fbdd44-86a6-4cb7-9d31-c6d524e93768\") " pod="openshift-marketplace/community-operators-nrxwx" Nov 26 23:07:54 crc kubenswrapper[4903]: I1126 23:07:54.119290 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12fbdd44-86a6-4cb7-9d31-c6d524e93768-catalog-content\") pod \"community-operators-nrxwx\" (UID: \"12fbdd44-86a6-4cb7-9d31-c6d524e93768\") " pod="openshift-marketplace/community-operators-nrxwx" Nov 26 23:07:54 crc kubenswrapper[4903]: I1126 23:07:54.140641 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5mjfr\" (UniqueName: \"kubernetes.io/projected/12fbdd44-86a6-4cb7-9d31-c6d524e93768-kube-api-access-5mjfr\") pod \"community-operators-nrxwx\" (UID: \"12fbdd44-86a6-4cb7-9d31-c6d524e93768\") " pod="openshift-marketplace/community-operators-nrxwx" Nov 26 23:07:54 crc kubenswrapper[4903]: I1126 23:07:54.273502 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nrxwx" Nov 26 23:07:54 crc kubenswrapper[4903]: I1126 23:07:54.810440 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nrxwx"] Nov 26 23:07:55 crc kubenswrapper[4903]: I1126 23:07:55.451947 4903 generic.go:334] "Generic (PLEG): container finished" podID="12fbdd44-86a6-4cb7-9d31-c6d524e93768" containerID="0ab73a87e47be123db0b6b269fc5ffca06df4782e0a15457d19b33255ce266d7" exitCode=0 Nov 26 23:07:55 crc kubenswrapper[4903]: I1126 23:07:55.452036 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nrxwx" event={"ID":"12fbdd44-86a6-4cb7-9d31-c6d524e93768","Type":"ContainerDied","Data":"0ab73a87e47be123db0b6b269fc5ffca06df4782e0a15457d19b33255ce266d7"} Nov 26 23:07:55 crc kubenswrapper[4903]: I1126 23:07:55.452219 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nrxwx" event={"ID":"12fbdd44-86a6-4cb7-9d31-c6d524e93768","Type":"ContainerStarted","Data":"ea0c8c510a0f030843f93373fca23bd85f9817fc2318eabbe971e784601ebc20"} Nov 26 23:07:56 crc kubenswrapper[4903]: I1126 23:07:56.473938 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nrxwx" event={"ID":"12fbdd44-86a6-4cb7-9d31-c6d524e93768","Type":"ContainerStarted","Data":"6496621844c0082fde867be379b682458cd8f23908f59f31d94b3439f572726f"} Nov 26 23:07:57 crc kubenswrapper[4903]: I1126 23:07:57.487523 4903 generic.go:334] "Generic (PLEG): container finished" podID="12fbdd44-86a6-4cb7-9d31-c6d524e93768" containerID="6496621844c0082fde867be379b682458cd8f23908f59f31d94b3439f572726f" exitCode=0 Nov 26 23:07:57 crc kubenswrapper[4903]: I1126 23:07:57.487587 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nrxwx" event={"ID":"12fbdd44-86a6-4cb7-9d31-c6d524e93768","Type":"ContainerDied","Data":"6496621844c0082fde867be379b682458cd8f23908f59f31d94b3439f572726f"} Nov 26 23:07:58 crc kubenswrapper[4903]: I1126 23:07:58.521097 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nrxwx" event={"ID":"12fbdd44-86a6-4cb7-9d31-c6d524e93768","Type":"ContainerStarted","Data":"4f48f4c2efde5c6a05adfa3287c1adc6eba853a2671ae9089006dda56058a9fb"} Nov 26 23:07:58 crc kubenswrapper[4903]: I1126 23:07:58.561068 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-nrxwx" podStartSLOduration=2.974123092 podStartE2EDuration="5.56104599s" podCreationTimestamp="2025-11-26 23:07:53 +0000 UTC" firstStartedPulling="2025-11-26 23:07:55.454435226 +0000 UTC m=+2804.144670136" lastFinishedPulling="2025-11-26 23:07:58.041358124 +0000 UTC m=+2806.731593034" observedRunningTime="2025-11-26 23:07:58.544994523 +0000 UTC m=+2807.235229473" watchObservedRunningTime="2025-11-26 23:07:58.56104599 +0000 UTC m=+2807.251280910" Nov 26 23:08:04 crc kubenswrapper[4903]: I1126 23:08:04.274197 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-nrxwx" Nov 26 23:08:04 crc kubenswrapper[4903]: I1126 23:08:04.275447 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-nrxwx" Nov 26 23:08:04 crc kubenswrapper[4903]: I1126 23:08:04.333466 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-nrxwx" Nov 26 23:08:04 crc kubenswrapper[4903]: I1126 23:08:04.659790 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-nrxwx" Nov 26 23:08:04 crc kubenswrapper[4903]: I1126 23:08:04.723627 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-nrxwx"] Nov 26 23:08:06 crc kubenswrapper[4903]: I1126 23:08:06.650336 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-nrxwx" podUID="12fbdd44-86a6-4cb7-9d31-c6d524e93768" containerName="registry-server" containerID="cri-o://4f48f4c2efde5c6a05adfa3287c1adc6eba853a2671ae9089006dda56058a9fb" gracePeriod=2 Nov 26 23:08:07 crc kubenswrapper[4903]: I1126 23:08:07.228955 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nrxwx" Nov 26 23:08:07 crc kubenswrapper[4903]: I1126 23:08:07.383442 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12fbdd44-86a6-4cb7-9d31-c6d524e93768-catalog-content\") pod \"12fbdd44-86a6-4cb7-9d31-c6d524e93768\" (UID: \"12fbdd44-86a6-4cb7-9d31-c6d524e93768\") " Nov 26 23:08:07 crc kubenswrapper[4903]: I1126 23:08:07.384173 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5mjfr\" (UniqueName: \"kubernetes.io/projected/12fbdd44-86a6-4cb7-9d31-c6d524e93768-kube-api-access-5mjfr\") pod \"12fbdd44-86a6-4cb7-9d31-c6d524e93768\" (UID: \"12fbdd44-86a6-4cb7-9d31-c6d524e93768\") " Nov 26 23:08:07 crc kubenswrapper[4903]: I1126 23:08:07.384253 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12fbdd44-86a6-4cb7-9d31-c6d524e93768-utilities\") pod \"12fbdd44-86a6-4cb7-9d31-c6d524e93768\" (UID: \"12fbdd44-86a6-4cb7-9d31-c6d524e93768\") " Nov 26 23:08:07 crc kubenswrapper[4903]: I1126 23:08:07.385289 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12fbdd44-86a6-4cb7-9d31-c6d524e93768-utilities" (OuterVolumeSpecName: "utilities") pod "12fbdd44-86a6-4cb7-9d31-c6d524e93768" (UID: "12fbdd44-86a6-4cb7-9d31-c6d524e93768"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:08:07 crc kubenswrapper[4903]: I1126 23:08:07.385645 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12fbdd44-86a6-4cb7-9d31-c6d524e93768-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 23:08:07 crc kubenswrapper[4903]: I1126 23:08:07.391821 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12fbdd44-86a6-4cb7-9d31-c6d524e93768-kube-api-access-5mjfr" (OuterVolumeSpecName: "kube-api-access-5mjfr") pod "12fbdd44-86a6-4cb7-9d31-c6d524e93768" (UID: "12fbdd44-86a6-4cb7-9d31-c6d524e93768"). InnerVolumeSpecName "kube-api-access-5mjfr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:08:07 crc kubenswrapper[4903]: I1126 23:08:07.446396 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12fbdd44-86a6-4cb7-9d31-c6d524e93768-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "12fbdd44-86a6-4cb7-9d31-c6d524e93768" (UID: "12fbdd44-86a6-4cb7-9d31-c6d524e93768"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:08:07 crc kubenswrapper[4903]: I1126 23:08:07.488282 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12fbdd44-86a6-4cb7-9d31-c6d524e93768-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 23:08:07 crc kubenswrapper[4903]: I1126 23:08:07.488323 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5mjfr\" (UniqueName: \"kubernetes.io/projected/12fbdd44-86a6-4cb7-9d31-c6d524e93768-kube-api-access-5mjfr\") on node \"crc\" DevicePath \"\"" Nov 26 23:08:07 crc kubenswrapper[4903]: I1126 23:08:07.662620 4903 generic.go:334] "Generic (PLEG): container finished" podID="12fbdd44-86a6-4cb7-9d31-c6d524e93768" containerID="4f48f4c2efde5c6a05adfa3287c1adc6eba853a2671ae9089006dda56058a9fb" exitCode=0 Nov 26 23:08:07 crc kubenswrapper[4903]: I1126 23:08:07.662659 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nrxwx" event={"ID":"12fbdd44-86a6-4cb7-9d31-c6d524e93768","Type":"ContainerDied","Data":"4f48f4c2efde5c6a05adfa3287c1adc6eba853a2671ae9089006dda56058a9fb"} Nov 26 23:08:07 crc kubenswrapper[4903]: I1126 23:08:07.662683 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nrxwx" event={"ID":"12fbdd44-86a6-4cb7-9d31-c6d524e93768","Type":"ContainerDied","Data":"ea0c8c510a0f030843f93373fca23bd85f9817fc2318eabbe971e784601ebc20"} Nov 26 23:08:07 crc kubenswrapper[4903]: I1126 23:08:07.662719 4903 scope.go:117] "RemoveContainer" containerID="4f48f4c2efde5c6a05adfa3287c1adc6eba853a2671ae9089006dda56058a9fb" Nov 26 23:08:07 crc kubenswrapper[4903]: I1126 23:08:07.662842 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nrxwx" Nov 26 23:08:07 crc kubenswrapper[4903]: I1126 23:08:07.695315 4903 scope.go:117] "RemoveContainer" containerID="6496621844c0082fde867be379b682458cd8f23908f59f31d94b3439f572726f" Nov 26 23:08:07 crc kubenswrapper[4903]: I1126 23:08:07.697587 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-nrxwx"] Nov 26 23:08:07 crc kubenswrapper[4903]: I1126 23:08:07.712535 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-nrxwx"] Nov 26 23:08:07 crc kubenswrapper[4903]: I1126 23:08:07.715991 4903 scope.go:117] "RemoveContainer" containerID="0ab73a87e47be123db0b6b269fc5ffca06df4782e0a15457d19b33255ce266d7" Nov 26 23:08:07 crc kubenswrapper[4903]: I1126 23:08:07.769228 4903 scope.go:117] "RemoveContainer" containerID="4f48f4c2efde5c6a05adfa3287c1adc6eba853a2671ae9089006dda56058a9fb" Nov 26 23:08:07 crc kubenswrapper[4903]: E1126 23:08:07.769717 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f48f4c2efde5c6a05adfa3287c1adc6eba853a2671ae9089006dda56058a9fb\": container with ID starting with 4f48f4c2efde5c6a05adfa3287c1adc6eba853a2671ae9089006dda56058a9fb not found: ID does not exist" containerID="4f48f4c2efde5c6a05adfa3287c1adc6eba853a2671ae9089006dda56058a9fb" Nov 26 23:08:07 crc kubenswrapper[4903]: I1126 23:08:07.769767 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f48f4c2efde5c6a05adfa3287c1adc6eba853a2671ae9089006dda56058a9fb"} err="failed to get container status \"4f48f4c2efde5c6a05adfa3287c1adc6eba853a2671ae9089006dda56058a9fb\": rpc error: code = NotFound desc = could not find container \"4f48f4c2efde5c6a05adfa3287c1adc6eba853a2671ae9089006dda56058a9fb\": container with ID starting with 4f48f4c2efde5c6a05adfa3287c1adc6eba853a2671ae9089006dda56058a9fb not found: ID does not exist" Nov 26 23:08:07 crc kubenswrapper[4903]: I1126 23:08:07.769794 4903 scope.go:117] "RemoveContainer" containerID="6496621844c0082fde867be379b682458cd8f23908f59f31d94b3439f572726f" Nov 26 23:08:07 crc kubenswrapper[4903]: E1126 23:08:07.770203 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6496621844c0082fde867be379b682458cd8f23908f59f31d94b3439f572726f\": container with ID starting with 6496621844c0082fde867be379b682458cd8f23908f59f31d94b3439f572726f not found: ID does not exist" containerID="6496621844c0082fde867be379b682458cd8f23908f59f31d94b3439f572726f" Nov 26 23:08:07 crc kubenswrapper[4903]: I1126 23:08:07.770229 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6496621844c0082fde867be379b682458cd8f23908f59f31d94b3439f572726f"} err="failed to get container status \"6496621844c0082fde867be379b682458cd8f23908f59f31d94b3439f572726f\": rpc error: code = NotFound desc = could not find container \"6496621844c0082fde867be379b682458cd8f23908f59f31d94b3439f572726f\": container with ID starting with 6496621844c0082fde867be379b682458cd8f23908f59f31d94b3439f572726f not found: ID does not exist" Nov 26 23:08:07 crc kubenswrapper[4903]: I1126 23:08:07.770241 4903 scope.go:117] "RemoveContainer" containerID="0ab73a87e47be123db0b6b269fc5ffca06df4782e0a15457d19b33255ce266d7" Nov 26 23:08:07 crc kubenswrapper[4903]: E1126 23:08:07.770463 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ab73a87e47be123db0b6b269fc5ffca06df4782e0a15457d19b33255ce266d7\": container with ID starting with 0ab73a87e47be123db0b6b269fc5ffca06df4782e0a15457d19b33255ce266d7 not found: ID does not exist" containerID="0ab73a87e47be123db0b6b269fc5ffca06df4782e0a15457d19b33255ce266d7" Nov 26 23:08:07 crc kubenswrapper[4903]: I1126 23:08:07.770483 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ab73a87e47be123db0b6b269fc5ffca06df4782e0a15457d19b33255ce266d7"} err="failed to get container status \"0ab73a87e47be123db0b6b269fc5ffca06df4782e0a15457d19b33255ce266d7\": rpc error: code = NotFound desc = could not find container \"0ab73a87e47be123db0b6b269fc5ffca06df4782e0a15457d19b33255ce266d7\": container with ID starting with 0ab73a87e47be123db0b6b269fc5ffca06df4782e0a15457d19b33255ce266d7 not found: ID does not exist" Nov 26 23:08:08 crc kubenswrapper[4903]: I1126 23:08:08.042587 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12fbdd44-86a6-4cb7-9d31-c6d524e93768" path="/var/lib/kubelet/pods/12fbdd44-86a6-4cb7-9d31-c6d524e93768/volumes" Nov 26 23:09:01 crc kubenswrapper[4903]: I1126 23:09:01.981324 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 23:09:01 crc kubenswrapper[4903]: I1126 23:09:01.982217 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 23:09:16 crc kubenswrapper[4903]: I1126 23:09:16.666491 4903 generic.go:334] "Generic (PLEG): container finished" podID="be764009-e30d-4394-b38c-83996b86b9e1" containerID="7d254b0db679751672a721dd46075f144e2b08dd454882c32324e2d2df7c6b60" exitCode=0 Nov 26 23:09:16 crc kubenswrapper[4903]: I1126 23:09:16.667169 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" event={"ID":"be764009-e30d-4394-b38c-83996b86b9e1","Type":"ContainerDied","Data":"7d254b0db679751672a721dd46075f144e2b08dd454882c32324e2d2df7c6b60"} Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.303119 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.393805 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-inventory\") pod \"be764009-e30d-4394-b38c-83996b86b9e1\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.394699 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-ssh-key\") pod \"be764009-e30d-4394-b38c-83996b86b9e1\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.394921 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-nova-migration-ssh-key-1\") pod \"be764009-e30d-4394-b38c-83996b86b9e1\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.395070 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-nova-cell1-compute-config-0\") pod \"be764009-e30d-4394-b38c-83996b86b9e1\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.395352 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m2v6d\" (UniqueName: \"kubernetes.io/projected/be764009-e30d-4394-b38c-83996b86b9e1-kube-api-access-m2v6d\") pod \"be764009-e30d-4394-b38c-83996b86b9e1\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.395462 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-nova-cell1-compute-config-1\") pod \"be764009-e30d-4394-b38c-83996b86b9e1\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.395524 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/be764009-e30d-4394-b38c-83996b86b9e1-nova-extra-config-0\") pod \"be764009-e30d-4394-b38c-83996b86b9e1\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.395625 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-nova-migration-ssh-key-0\") pod \"be764009-e30d-4394-b38c-83996b86b9e1\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.395741 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-nova-combined-ca-bundle\") pod \"be764009-e30d-4394-b38c-83996b86b9e1\" (UID: \"be764009-e30d-4394-b38c-83996b86b9e1\") " Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.417827 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "be764009-e30d-4394-b38c-83996b86b9e1" (UID: "be764009-e30d-4394-b38c-83996b86b9e1"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.418932 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be764009-e30d-4394-b38c-83996b86b9e1-kube-api-access-m2v6d" (OuterVolumeSpecName: "kube-api-access-m2v6d") pod "be764009-e30d-4394-b38c-83996b86b9e1" (UID: "be764009-e30d-4394-b38c-83996b86b9e1"). InnerVolumeSpecName "kube-api-access-m2v6d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.434257 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "be764009-e30d-4394-b38c-83996b86b9e1" (UID: "be764009-e30d-4394-b38c-83996b86b9e1"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.442487 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "be764009-e30d-4394-b38c-83996b86b9e1" (UID: "be764009-e30d-4394-b38c-83996b86b9e1"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.447078 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "be764009-e30d-4394-b38c-83996b86b9e1" (UID: "be764009-e30d-4394-b38c-83996b86b9e1"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.459601 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "be764009-e30d-4394-b38c-83996b86b9e1" (UID: "be764009-e30d-4394-b38c-83996b86b9e1"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.461888 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-inventory" (OuterVolumeSpecName: "inventory") pod "be764009-e30d-4394-b38c-83996b86b9e1" (UID: "be764009-e30d-4394-b38c-83996b86b9e1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.465307 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "be764009-e30d-4394-b38c-83996b86b9e1" (UID: "be764009-e30d-4394-b38c-83996b86b9e1"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.467335 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/be764009-e30d-4394-b38c-83996b86b9e1-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "be764009-e30d-4394-b38c-83996b86b9e1" (UID: "be764009-e30d-4394-b38c-83996b86b9e1"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.498836 4903 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.499096 4903 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.499108 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m2v6d\" (UniqueName: \"kubernetes.io/projected/be764009-e30d-4394-b38c-83996b86b9e1-kube-api-access-m2v6d\") on node \"crc\" DevicePath \"\"" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.499117 4903 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.499127 4903 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.499137 4903 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/be764009-e30d-4394-b38c-83996b86b9e1-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.499145 4903 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.499153 4903 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.499162 4903 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/be764009-e30d-4394-b38c-83996b86b9e1-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.699282 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.699817 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r5l7j" event={"ID":"be764009-e30d-4394-b38c-83996b86b9e1","Type":"ContainerDied","Data":"07375581ceb6998d1535baf679999ce4b87e7b7983cf0e0725d4f401a57adeb8"} Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.699911 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="07375581ceb6998d1535baf679999ce4b87e7b7983cf0e0725d4f401a57adeb8" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.789663 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk"] Nov 26 23:09:18 crc kubenswrapper[4903]: E1126 23:09:18.790136 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12fbdd44-86a6-4cb7-9d31-c6d524e93768" containerName="extract-content" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.790153 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="12fbdd44-86a6-4cb7-9d31-c6d524e93768" containerName="extract-content" Nov 26 23:09:18 crc kubenswrapper[4903]: E1126 23:09:18.790171 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12fbdd44-86a6-4cb7-9d31-c6d524e93768" containerName="registry-server" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.790178 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="12fbdd44-86a6-4cb7-9d31-c6d524e93768" containerName="registry-server" Nov 26 23:09:18 crc kubenswrapper[4903]: E1126 23:09:18.790193 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be764009-e30d-4394-b38c-83996b86b9e1" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.790199 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="be764009-e30d-4394-b38c-83996b86b9e1" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 26 23:09:18 crc kubenswrapper[4903]: E1126 23:09:18.790214 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12fbdd44-86a6-4cb7-9d31-c6d524e93768" containerName="extract-utilities" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.790220 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="12fbdd44-86a6-4cb7-9d31-c6d524e93768" containerName="extract-utilities" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.790465 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="12fbdd44-86a6-4cb7-9d31-c6d524e93768" containerName="registry-server" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.790486 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="be764009-e30d-4394-b38c-83996b86b9e1" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.791339 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.793715 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.793744 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.796197 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.796558 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.797163 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-62bf2" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.825559 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk"] Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.908428 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk\" (UID: \"78703fde-a3cc-4241-940e-f92a638f8549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.908834 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk\" (UID: \"78703fde-a3cc-4241-940e-f92a638f8549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.908907 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r7bxh\" (UniqueName: \"kubernetes.io/projected/78703fde-a3cc-4241-940e-f92a638f8549-kube-api-access-r7bxh\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk\" (UID: \"78703fde-a3cc-4241-940e-f92a638f8549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.909001 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk\" (UID: \"78703fde-a3cc-4241-940e-f92a638f8549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.909352 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk\" (UID: \"78703fde-a3cc-4241-940e-f92a638f8549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.909430 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk\" (UID: \"78703fde-a3cc-4241-940e-f92a638f8549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk" Nov 26 23:09:18 crc kubenswrapper[4903]: I1126 23:09:18.909560 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk\" (UID: \"78703fde-a3cc-4241-940e-f92a638f8549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk" Nov 26 23:09:19 crc kubenswrapper[4903]: I1126 23:09:19.011728 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk\" (UID: \"78703fde-a3cc-4241-940e-f92a638f8549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk" Nov 26 23:09:19 crc kubenswrapper[4903]: I1126 23:09:19.011776 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk\" (UID: \"78703fde-a3cc-4241-940e-f92a638f8549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk" Nov 26 23:09:19 crc kubenswrapper[4903]: I1126 23:09:19.011818 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk\" (UID: \"78703fde-a3cc-4241-940e-f92a638f8549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk" Nov 26 23:09:19 crc kubenswrapper[4903]: I1126 23:09:19.011847 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk\" (UID: \"78703fde-a3cc-4241-940e-f92a638f8549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk" Nov 26 23:09:19 crc kubenswrapper[4903]: I1126 23:09:19.011926 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk\" (UID: \"78703fde-a3cc-4241-940e-f92a638f8549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk" Nov 26 23:09:19 crc kubenswrapper[4903]: I1126 23:09:19.011948 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r7bxh\" (UniqueName: \"kubernetes.io/projected/78703fde-a3cc-4241-940e-f92a638f8549-kube-api-access-r7bxh\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk\" (UID: \"78703fde-a3cc-4241-940e-f92a638f8549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk" Nov 26 23:09:19 crc kubenswrapper[4903]: I1126 23:09:19.011979 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk\" (UID: \"78703fde-a3cc-4241-940e-f92a638f8549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk" Nov 26 23:09:19 crc kubenswrapper[4903]: I1126 23:09:19.017732 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk\" (UID: \"78703fde-a3cc-4241-940e-f92a638f8549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk" Nov 26 23:09:19 crc kubenswrapper[4903]: I1126 23:09:19.017734 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk\" (UID: \"78703fde-a3cc-4241-940e-f92a638f8549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk" Nov 26 23:09:19 crc kubenswrapper[4903]: I1126 23:09:19.017742 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk\" (UID: \"78703fde-a3cc-4241-940e-f92a638f8549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk" Nov 26 23:09:19 crc kubenswrapper[4903]: I1126 23:09:19.018679 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk\" (UID: \"78703fde-a3cc-4241-940e-f92a638f8549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk" Nov 26 23:09:19 crc kubenswrapper[4903]: I1126 23:09:19.025102 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk\" (UID: \"78703fde-a3cc-4241-940e-f92a638f8549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk" Nov 26 23:09:19 crc kubenswrapper[4903]: I1126 23:09:19.032301 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk\" (UID: \"78703fde-a3cc-4241-940e-f92a638f8549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk" Nov 26 23:09:19 crc kubenswrapper[4903]: I1126 23:09:19.033258 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r7bxh\" (UniqueName: \"kubernetes.io/projected/78703fde-a3cc-4241-940e-f92a638f8549-kube-api-access-r7bxh\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk\" (UID: \"78703fde-a3cc-4241-940e-f92a638f8549\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk" Nov 26 23:09:19 crc kubenswrapper[4903]: I1126 23:09:19.115507 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk" Nov 26 23:09:19 crc kubenswrapper[4903]: I1126 23:09:19.760250 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk"] Nov 26 23:09:20 crc kubenswrapper[4903]: I1126 23:09:20.718914 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk" event={"ID":"78703fde-a3cc-4241-940e-f92a638f8549","Type":"ContainerStarted","Data":"da7bd45da6d3671150fb90d1dac42ee8d11af1d1cc8c1bf83d614aa8730866d9"} Nov 26 23:09:21 crc kubenswrapper[4903]: I1126 23:09:21.740449 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk" event={"ID":"78703fde-a3cc-4241-940e-f92a638f8549","Type":"ContainerStarted","Data":"3756a93108af044937cf708039605fd867d13ad48831c3b8022ed4ccff680afc"} Nov 26 23:09:21 crc kubenswrapper[4903]: I1126 23:09:21.795155 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk" podStartSLOduration=3.135506303 podStartE2EDuration="3.795128122s" podCreationTimestamp="2025-11-26 23:09:18 +0000 UTC" firstStartedPulling="2025-11-26 23:09:19.767170752 +0000 UTC m=+2888.457405672" lastFinishedPulling="2025-11-26 23:09:20.426792551 +0000 UTC m=+2889.117027491" observedRunningTime="2025-11-26 23:09:21.776119632 +0000 UTC m=+2890.466354582" watchObservedRunningTime="2025-11-26 23:09:21.795128122 +0000 UTC m=+2890.485363052" Nov 26 23:09:31 crc kubenswrapper[4903]: I1126 23:09:31.982603 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 23:09:31 crc kubenswrapper[4903]: I1126 23:09:31.983210 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 23:09:46 crc kubenswrapper[4903]: I1126 23:09:46.493390 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rxb2d"] Nov 26 23:09:46 crc kubenswrapper[4903]: I1126 23:09:46.497569 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rxb2d" Nov 26 23:09:46 crc kubenswrapper[4903]: I1126 23:09:46.514789 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rxb2d"] Nov 26 23:09:46 crc kubenswrapper[4903]: I1126 23:09:46.653475 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fcaf3d75-9068-4ad1-b786-afa408c60f43-utilities\") pod \"redhat-marketplace-rxb2d\" (UID: \"fcaf3d75-9068-4ad1-b786-afa408c60f43\") " pod="openshift-marketplace/redhat-marketplace-rxb2d" Nov 26 23:09:46 crc kubenswrapper[4903]: I1126 23:09:46.653750 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fcaf3d75-9068-4ad1-b786-afa408c60f43-catalog-content\") pod \"redhat-marketplace-rxb2d\" (UID: \"fcaf3d75-9068-4ad1-b786-afa408c60f43\") " pod="openshift-marketplace/redhat-marketplace-rxb2d" Nov 26 23:09:46 crc kubenswrapper[4903]: I1126 23:09:46.654186 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sd6bh\" (UniqueName: \"kubernetes.io/projected/fcaf3d75-9068-4ad1-b786-afa408c60f43-kube-api-access-sd6bh\") pod \"redhat-marketplace-rxb2d\" (UID: \"fcaf3d75-9068-4ad1-b786-afa408c60f43\") " pod="openshift-marketplace/redhat-marketplace-rxb2d" Nov 26 23:09:46 crc kubenswrapper[4903]: I1126 23:09:46.756012 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fcaf3d75-9068-4ad1-b786-afa408c60f43-utilities\") pod \"redhat-marketplace-rxb2d\" (UID: \"fcaf3d75-9068-4ad1-b786-afa408c60f43\") " pod="openshift-marketplace/redhat-marketplace-rxb2d" Nov 26 23:09:46 crc kubenswrapper[4903]: I1126 23:09:46.756132 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fcaf3d75-9068-4ad1-b786-afa408c60f43-catalog-content\") pod \"redhat-marketplace-rxb2d\" (UID: \"fcaf3d75-9068-4ad1-b786-afa408c60f43\") " pod="openshift-marketplace/redhat-marketplace-rxb2d" Nov 26 23:09:46 crc kubenswrapper[4903]: I1126 23:09:46.756274 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sd6bh\" (UniqueName: \"kubernetes.io/projected/fcaf3d75-9068-4ad1-b786-afa408c60f43-kube-api-access-sd6bh\") pod \"redhat-marketplace-rxb2d\" (UID: \"fcaf3d75-9068-4ad1-b786-afa408c60f43\") " pod="openshift-marketplace/redhat-marketplace-rxb2d" Nov 26 23:09:46 crc kubenswrapper[4903]: I1126 23:09:46.756731 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fcaf3d75-9068-4ad1-b786-afa408c60f43-catalog-content\") pod \"redhat-marketplace-rxb2d\" (UID: \"fcaf3d75-9068-4ad1-b786-afa408c60f43\") " pod="openshift-marketplace/redhat-marketplace-rxb2d" Nov 26 23:09:46 crc kubenswrapper[4903]: I1126 23:09:46.756875 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fcaf3d75-9068-4ad1-b786-afa408c60f43-utilities\") pod \"redhat-marketplace-rxb2d\" (UID: \"fcaf3d75-9068-4ad1-b786-afa408c60f43\") " pod="openshift-marketplace/redhat-marketplace-rxb2d" Nov 26 23:09:46 crc kubenswrapper[4903]: I1126 23:09:46.778828 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sd6bh\" (UniqueName: \"kubernetes.io/projected/fcaf3d75-9068-4ad1-b786-afa408c60f43-kube-api-access-sd6bh\") pod \"redhat-marketplace-rxb2d\" (UID: \"fcaf3d75-9068-4ad1-b786-afa408c60f43\") " pod="openshift-marketplace/redhat-marketplace-rxb2d" Nov 26 23:09:46 crc kubenswrapper[4903]: I1126 23:09:46.851147 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rxb2d" Nov 26 23:09:47 crc kubenswrapper[4903]: I1126 23:09:47.377108 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rxb2d"] Nov 26 23:09:48 crc kubenswrapper[4903]: I1126 23:09:48.092211 4903 generic.go:334] "Generic (PLEG): container finished" podID="fcaf3d75-9068-4ad1-b786-afa408c60f43" containerID="48f402ddb6e7db13d4bef4757fd7f255a8d338ec38cf488d0f921c0b18aa12c2" exitCode=0 Nov 26 23:09:48 crc kubenswrapper[4903]: I1126 23:09:48.092312 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rxb2d" event={"ID":"fcaf3d75-9068-4ad1-b786-afa408c60f43","Type":"ContainerDied","Data":"48f402ddb6e7db13d4bef4757fd7f255a8d338ec38cf488d0f921c0b18aa12c2"} Nov 26 23:09:48 crc kubenswrapper[4903]: I1126 23:09:48.092647 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rxb2d" event={"ID":"fcaf3d75-9068-4ad1-b786-afa408c60f43","Type":"ContainerStarted","Data":"2e24466ea0d32d1a68ae1fd6fdb560de66fa172bc073f9a822429f693dee543b"} Nov 26 23:09:49 crc kubenswrapper[4903]: I1126 23:09:49.124002 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rxb2d" event={"ID":"fcaf3d75-9068-4ad1-b786-afa408c60f43","Type":"ContainerStarted","Data":"7a711af485b975d2432212e6a9045ccd5252de913627d7acab5dd0fe1abf1d65"} Nov 26 23:09:50 crc kubenswrapper[4903]: I1126 23:09:50.138184 4903 generic.go:334] "Generic (PLEG): container finished" podID="fcaf3d75-9068-4ad1-b786-afa408c60f43" containerID="7a711af485b975d2432212e6a9045ccd5252de913627d7acab5dd0fe1abf1d65" exitCode=0 Nov 26 23:09:50 crc kubenswrapper[4903]: I1126 23:09:50.138227 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rxb2d" event={"ID":"fcaf3d75-9068-4ad1-b786-afa408c60f43","Type":"ContainerDied","Data":"7a711af485b975d2432212e6a9045ccd5252de913627d7acab5dd0fe1abf1d65"} Nov 26 23:09:51 crc kubenswrapper[4903]: I1126 23:09:51.159229 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rxb2d" event={"ID":"fcaf3d75-9068-4ad1-b786-afa408c60f43","Type":"ContainerStarted","Data":"4653c0362ac3741aaeb24fba150148aa0f7406aa1173b7a99837f0a595758d9d"} Nov 26 23:09:51 crc kubenswrapper[4903]: I1126 23:09:51.187277 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-rxb2d" podStartSLOduration=2.59918737 podStartE2EDuration="5.187255174s" podCreationTimestamp="2025-11-26 23:09:46 +0000 UTC" firstStartedPulling="2025-11-26 23:09:48.095083785 +0000 UTC m=+2916.785318685" lastFinishedPulling="2025-11-26 23:09:50.683151589 +0000 UTC m=+2919.373386489" observedRunningTime="2025-11-26 23:09:51.177209215 +0000 UTC m=+2919.867444135" watchObservedRunningTime="2025-11-26 23:09:51.187255174 +0000 UTC m=+2919.877490084" Nov 26 23:09:56 crc kubenswrapper[4903]: I1126 23:09:56.851590 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-rxb2d" Nov 26 23:09:56 crc kubenswrapper[4903]: I1126 23:09:56.852120 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-rxb2d" Nov 26 23:09:56 crc kubenswrapper[4903]: I1126 23:09:56.895963 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-rxb2d" Nov 26 23:09:57 crc kubenswrapper[4903]: I1126 23:09:57.357128 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-rxb2d" Nov 26 23:09:57 crc kubenswrapper[4903]: I1126 23:09:57.452129 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rxb2d"] Nov 26 23:09:59 crc kubenswrapper[4903]: I1126 23:09:59.290397 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-rxb2d" podUID="fcaf3d75-9068-4ad1-b786-afa408c60f43" containerName="registry-server" containerID="cri-o://4653c0362ac3741aaeb24fba150148aa0f7406aa1173b7a99837f0a595758d9d" gracePeriod=2 Nov 26 23:09:59 crc kubenswrapper[4903]: I1126 23:09:59.893361 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rxb2d" Nov 26 23:09:59 crc kubenswrapper[4903]: I1126 23:09:59.994207 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sd6bh\" (UniqueName: \"kubernetes.io/projected/fcaf3d75-9068-4ad1-b786-afa408c60f43-kube-api-access-sd6bh\") pod \"fcaf3d75-9068-4ad1-b786-afa408c60f43\" (UID: \"fcaf3d75-9068-4ad1-b786-afa408c60f43\") " Nov 26 23:09:59 crc kubenswrapper[4903]: I1126 23:09:59.994337 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fcaf3d75-9068-4ad1-b786-afa408c60f43-catalog-content\") pod \"fcaf3d75-9068-4ad1-b786-afa408c60f43\" (UID: \"fcaf3d75-9068-4ad1-b786-afa408c60f43\") " Nov 26 23:09:59 crc kubenswrapper[4903]: I1126 23:09:59.994406 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fcaf3d75-9068-4ad1-b786-afa408c60f43-utilities\") pod \"fcaf3d75-9068-4ad1-b786-afa408c60f43\" (UID: \"fcaf3d75-9068-4ad1-b786-afa408c60f43\") " Nov 26 23:09:59 crc kubenswrapper[4903]: I1126 23:09:59.995163 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fcaf3d75-9068-4ad1-b786-afa408c60f43-utilities" (OuterVolumeSpecName: "utilities") pod "fcaf3d75-9068-4ad1-b786-afa408c60f43" (UID: "fcaf3d75-9068-4ad1-b786-afa408c60f43"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:10:00 crc kubenswrapper[4903]: I1126 23:10:00.000517 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fcaf3d75-9068-4ad1-b786-afa408c60f43-kube-api-access-sd6bh" (OuterVolumeSpecName: "kube-api-access-sd6bh") pod "fcaf3d75-9068-4ad1-b786-afa408c60f43" (UID: "fcaf3d75-9068-4ad1-b786-afa408c60f43"). InnerVolumeSpecName "kube-api-access-sd6bh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:10:00 crc kubenswrapper[4903]: I1126 23:10:00.014068 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fcaf3d75-9068-4ad1-b786-afa408c60f43-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fcaf3d75-9068-4ad1-b786-afa408c60f43" (UID: "fcaf3d75-9068-4ad1-b786-afa408c60f43"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:10:00 crc kubenswrapper[4903]: I1126 23:10:00.096620 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fcaf3d75-9068-4ad1-b786-afa408c60f43-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 23:10:00 crc kubenswrapper[4903]: I1126 23:10:00.096650 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fcaf3d75-9068-4ad1-b786-afa408c60f43-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 23:10:00 crc kubenswrapper[4903]: I1126 23:10:00.096660 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sd6bh\" (UniqueName: \"kubernetes.io/projected/fcaf3d75-9068-4ad1-b786-afa408c60f43-kube-api-access-sd6bh\") on node \"crc\" DevicePath \"\"" Nov 26 23:10:00 crc kubenswrapper[4903]: I1126 23:10:00.306051 4903 generic.go:334] "Generic (PLEG): container finished" podID="fcaf3d75-9068-4ad1-b786-afa408c60f43" containerID="4653c0362ac3741aaeb24fba150148aa0f7406aa1173b7a99837f0a595758d9d" exitCode=0 Nov 26 23:10:00 crc kubenswrapper[4903]: I1126 23:10:00.306102 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rxb2d" event={"ID":"fcaf3d75-9068-4ad1-b786-afa408c60f43","Type":"ContainerDied","Data":"4653c0362ac3741aaeb24fba150148aa0f7406aa1173b7a99837f0a595758d9d"} Nov 26 23:10:00 crc kubenswrapper[4903]: I1126 23:10:00.306190 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rxb2d" Nov 26 23:10:00 crc kubenswrapper[4903]: I1126 23:10:00.308136 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rxb2d" event={"ID":"fcaf3d75-9068-4ad1-b786-afa408c60f43","Type":"ContainerDied","Data":"2e24466ea0d32d1a68ae1fd6fdb560de66fa172bc073f9a822429f693dee543b"} Nov 26 23:10:00 crc kubenswrapper[4903]: I1126 23:10:00.308341 4903 scope.go:117] "RemoveContainer" containerID="4653c0362ac3741aaeb24fba150148aa0f7406aa1173b7a99837f0a595758d9d" Nov 26 23:10:00 crc kubenswrapper[4903]: I1126 23:10:00.349956 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rxb2d"] Nov 26 23:10:00 crc kubenswrapper[4903]: I1126 23:10:00.359511 4903 scope.go:117] "RemoveContainer" containerID="7a711af485b975d2432212e6a9045ccd5252de913627d7acab5dd0fe1abf1d65" Nov 26 23:10:00 crc kubenswrapper[4903]: I1126 23:10:00.366144 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-rxb2d"] Nov 26 23:10:00 crc kubenswrapper[4903]: I1126 23:10:00.395953 4903 scope.go:117] "RemoveContainer" containerID="48f402ddb6e7db13d4bef4757fd7f255a8d338ec38cf488d0f921c0b18aa12c2" Nov 26 23:10:00 crc kubenswrapper[4903]: I1126 23:10:00.467596 4903 scope.go:117] "RemoveContainer" containerID="4653c0362ac3741aaeb24fba150148aa0f7406aa1173b7a99837f0a595758d9d" Nov 26 23:10:00 crc kubenswrapper[4903]: E1126 23:10:00.468968 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4653c0362ac3741aaeb24fba150148aa0f7406aa1173b7a99837f0a595758d9d\": container with ID starting with 4653c0362ac3741aaeb24fba150148aa0f7406aa1173b7a99837f0a595758d9d not found: ID does not exist" containerID="4653c0362ac3741aaeb24fba150148aa0f7406aa1173b7a99837f0a595758d9d" Nov 26 23:10:00 crc kubenswrapper[4903]: I1126 23:10:00.468997 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4653c0362ac3741aaeb24fba150148aa0f7406aa1173b7a99837f0a595758d9d"} err="failed to get container status \"4653c0362ac3741aaeb24fba150148aa0f7406aa1173b7a99837f0a595758d9d\": rpc error: code = NotFound desc = could not find container \"4653c0362ac3741aaeb24fba150148aa0f7406aa1173b7a99837f0a595758d9d\": container with ID starting with 4653c0362ac3741aaeb24fba150148aa0f7406aa1173b7a99837f0a595758d9d not found: ID does not exist" Nov 26 23:10:00 crc kubenswrapper[4903]: I1126 23:10:00.469031 4903 scope.go:117] "RemoveContainer" containerID="7a711af485b975d2432212e6a9045ccd5252de913627d7acab5dd0fe1abf1d65" Nov 26 23:10:00 crc kubenswrapper[4903]: E1126 23:10:00.469314 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7a711af485b975d2432212e6a9045ccd5252de913627d7acab5dd0fe1abf1d65\": container with ID starting with 7a711af485b975d2432212e6a9045ccd5252de913627d7acab5dd0fe1abf1d65 not found: ID does not exist" containerID="7a711af485b975d2432212e6a9045ccd5252de913627d7acab5dd0fe1abf1d65" Nov 26 23:10:00 crc kubenswrapper[4903]: I1126 23:10:00.469378 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a711af485b975d2432212e6a9045ccd5252de913627d7acab5dd0fe1abf1d65"} err="failed to get container status \"7a711af485b975d2432212e6a9045ccd5252de913627d7acab5dd0fe1abf1d65\": rpc error: code = NotFound desc = could not find container \"7a711af485b975d2432212e6a9045ccd5252de913627d7acab5dd0fe1abf1d65\": container with ID starting with 7a711af485b975d2432212e6a9045ccd5252de913627d7acab5dd0fe1abf1d65 not found: ID does not exist" Nov 26 23:10:00 crc kubenswrapper[4903]: I1126 23:10:00.469420 4903 scope.go:117] "RemoveContainer" containerID="48f402ddb6e7db13d4bef4757fd7f255a8d338ec38cf488d0f921c0b18aa12c2" Nov 26 23:10:00 crc kubenswrapper[4903]: E1126 23:10:00.470028 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48f402ddb6e7db13d4bef4757fd7f255a8d338ec38cf488d0f921c0b18aa12c2\": container with ID starting with 48f402ddb6e7db13d4bef4757fd7f255a8d338ec38cf488d0f921c0b18aa12c2 not found: ID does not exist" containerID="48f402ddb6e7db13d4bef4757fd7f255a8d338ec38cf488d0f921c0b18aa12c2" Nov 26 23:10:00 crc kubenswrapper[4903]: I1126 23:10:00.470052 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48f402ddb6e7db13d4bef4757fd7f255a8d338ec38cf488d0f921c0b18aa12c2"} err="failed to get container status \"48f402ddb6e7db13d4bef4757fd7f255a8d338ec38cf488d0f921c0b18aa12c2\": rpc error: code = NotFound desc = could not find container \"48f402ddb6e7db13d4bef4757fd7f255a8d338ec38cf488d0f921c0b18aa12c2\": container with ID starting with 48f402ddb6e7db13d4bef4757fd7f255a8d338ec38cf488d0f921c0b18aa12c2 not found: ID does not exist" Nov 26 23:10:01 crc kubenswrapper[4903]: I1126 23:10:01.982043 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 23:10:01 crc kubenswrapper[4903]: I1126 23:10:01.982420 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 23:10:01 crc kubenswrapper[4903]: I1126 23:10:01.982488 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 23:10:01 crc kubenswrapper[4903]: I1126 23:10:01.983222 4903 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b4f2997ac38c47b9761af8aec04555c21367d1561cc50b6d8dc9ce9b0946fee7"} pod="openshift-machine-config-operator/machine-config-daemon-wjwph" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 23:10:01 crc kubenswrapper[4903]: I1126 23:10:01.983299 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" containerID="cri-o://b4f2997ac38c47b9761af8aec04555c21367d1561cc50b6d8dc9ce9b0946fee7" gracePeriod=600 Nov 26 23:10:02 crc kubenswrapper[4903]: I1126 23:10:02.051100 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fcaf3d75-9068-4ad1-b786-afa408c60f43" path="/var/lib/kubelet/pods/fcaf3d75-9068-4ad1-b786-afa408c60f43/volumes" Nov 26 23:10:02 crc kubenswrapper[4903]: I1126 23:10:02.343222 4903 generic.go:334] "Generic (PLEG): container finished" podID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerID="b4f2997ac38c47b9761af8aec04555c21367d1561cc50b6d8dc9ce9b0946fee7" exitCode=0 Nov 26 23:10:02 crc kubenswrapper[4903]: I1126 23:10:02.343277 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerDied","Data":"b4f2997ac38c47b9761af8aec04555c21367d1561cc50b6d8dc9ce9b0946fee7"} Nov 26 23:10:02 crc kubenswrapper[4903]: I1126 23:10:02.343502 4903 scope.go:117] "RemoveContainer" containerID="e1a4187875e34a3010ba079d7bb64d352d090491c47d19e4115140a9af1b0152" Nov 26 23:10:03 crc kubenswrapper[4903]: I1126 23:10:03.358998 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerStarted","Data":"07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9"} Nov 26 23:11:59 crc kubenswrapper[4903]: I1126 23:11:59.976161 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-fbtdw"] Nov 26 23:11:59 crc kubenswrapper[4903]: E1126 23:11:59.977507 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fcaf3d75-9068-4ad1-b786-afa408c60f43" containerName="extract-utilities" Nov 26 23:11:59 crc kubenswrapper[4903]: I1126 23:11:59.977529 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="fcaf3d75-9068-4ad1-b786-afa408c60f43" containerName="extract-utilities" Nov 26 23:11:59 crc kubenswrapper[4903]: E1126 23:11:59.977570 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fcaf3d75-9068-4ad1-b786-afa408c60f43" containerName="registry-server" Nov 26 23:11:59 crc kubenswrapper[4903]: I1126 23:11:59.977581 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="fcaf3d75-9068-4ad1-b786-afa408c60f43" containerName="registry-server" Nov 26 23:11:59 crc kubenswrapper[4903]: E1126 23:11:59.977608 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fcaf3d75-9068-4ad1-b786-afa408c60f43" containerName="extract-content" Nov 26 23:11:59 crc kubenswrapper[4903]: I1126 23:11:59.977619 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="fcaf3d75-9068-4ad1-b786-afa408c60f43" containerName="extract-content" Nov 26 23:11:59 crc kubenswrapper[4903]: I1126 23:11:59.978020 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="fcaf3d75-9068-4ad1-b786-afa408c60f43" containerName="registry-server" Nov 26 23:11:59 crc kubenswrapper[4903]: I1126 23:11:59.980776 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fbtdw" Nov 26 23:12:00 crc kubenswrapper[4903]: I1126 23:12:00.008455 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fbtdw"] Nov 26 23:12:00 crc kubenswrapper[4903]: I1126 23:12:00.047648 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pzfwm\" (UniqueName: \"kubernetes.io/projected/ac153cb0-0d58-47be-8d9e-3743cff62a79-kube-api-access-pzfwm\") pod \"certified-operators-fbtdw\" (UID: \"ac153cb0-0d58-47be-8d9e-3743cff62a79\") " pod="openshift-marketplace/certified-operators-fbtdw" Nov 26 23:12:00 crc kubenswrapper[4903]: I1126 23:12:00.048200 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac153cb0-0d58-47be-8d9e-3743cff62a79-catalog-content\") pod \"certified-operators-fbtdw\" (UID: \"ac153cb0-0d58-47be-8d9e-3743cff62a79\") " pod="openshift-marketplace/certified-operators-fbtdw" Nov 26 23:12:00 crc kubenswrapper[4903]: I1126 23:12:00.049050 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac153cb0-0d58-47be-8d9e-3743cff62a79-utilities\") pod \"certified-operators-fbtdw\" (UID: \"ac153cb0-0d58-47be-8d9e-3743cff62a79\") " pod="openshift-marketplace/certified-operators-fbtdw" Nov 26 23:12:00 crc kubenswrapper[4903]: I1126 23:12:00.151448 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pzfwm\" (UniqueName: \"kubernetes.io/projected/ac153cb0-0d58-47be-8d9e-3743cff62a79-kube-api-access-pzfwm\") pod \"certified-operators-fbtdw\" (UID: \"ac153cb0-0d58-47be-8d9e-3743cff62a79\") " pod="openshift-marketplace/certified-operators-fbtdw" Nov 26 23:12:00 crc kubenswrapper[4903]: I1126 23:12:00.151501 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac153cb0-0d58-47be-8d9e-3743cff62a79-catalog-content\") pod \"certified-operators-fbtdw\" (UID: \"ac153cb0-0d58-47be-8d9e-3743cff62a79\") " pod="openshift-marketplace/certified-operators-fbtdw" Nov 26 23:12:00 crc kubenswrapper[4903]: I1126 23:12:00.151562 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac153cb0-0d58-47be-8d9e-3743cff62a79-utilities\") pod \"certified-operators-fbtdw\" (UID: \"ac153cb0-0d58-47be-8d9e-3743cff62a79\") " pod="openshift-marketplace/certified-operators-fbtdw" Nov 26 23:12:00 crc kubenswrapper[4903]: I1126 23:12:00.152049 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac153cb0-0d58-47be-8d9e-3743cff62a79-utilities\") pod \"certified-operators-fbtdw\" (UID: \"ac153cb0-0d58-47be-8d9e-3743cff62a79\") " pod="openshift-marketplace/certified-operators-fbtdw" Nov 26 23:12:00 crc kubenswrapper[4903]: I1126 23:12:00.152386 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac153cb0-0d58-47be-8d9e-3743cff62a79-catalog-content\") pod \"certified-operators-fbtdw\" (UID: \"ac153cb0-0d58-47be-8d9e-3743cff62a79\") " pod="openshift-marketplace/certified-operators-fbtdw" Nov 26 23:12:00 crc kubenswrapper[4903]: I1126 23:12:00.178883 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pzfwm\" (UniqueName: \"kubernetes.io/projected/ac153cb0-0d58-47be-8d9e-3743cff62a79-kube-api-access-pzfwm\") pod \"certified-operators-fbtdw\" (UID: \"ac153cb0-0d58-47be-8d9e-3743cff62a79\") " pod="openshift-marketplace/certified-operators-fbtdw" Nov 26 23:12:00 crc kubenswrapper[4903]: I1126 23:12:00.322790 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fbtdw" Nov 26 23:12:00 crc kubenswrapper[4903]: I1126 23:12:00.883301 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fbtdw"] Nov 26 23:12:01 crc kubenswrapper[4903]: I1126 23:12:01.080842 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fbtdw" event={"ID":"ac153cb0-0d58-47be-8d9e-3743cff62a79","Type":"ContainerStarted","Data":"1c6c9a45e48679214061cdcb4f6dc7221b0bab5336b0e4814d4d952a7a3a1a4c"} Nov 26 23:12:02 crc kubenswrapper[4903]: I1126 23:12:02.093537 4903 generic.go:334] "Generic (PLEG): container finished" podID="ac153cb0-0d58-47be-8d9e-3743cff62a79" containerID="1efcee3ce167d48850cdecb1e6417c19f6c7f0abe7947168abc09aac5c5e59fb" exitCode=0 Nov 26 23:12:02 crc kubenswrapper[4903]: I1126 23:12:02.093833 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fbtdw" event={"ID":"ac153cb0-0d58-47be-8d9e-3743cff62a79","Type":"ContainerDied","Data":"1efcee3ce167d48850cdecb1e6417c19f6c7f0abe7947168abc09aac5c5e59fb"} Nov 26 23:12:02 crc kubenswrapper[4903]: I1126 23:12:02.096816 4903 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 23:12:03 crc kubenswrapper[4903]: I1126 23:12:03.105057 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fbtdw" event={"ID":"ac153cb0-0d58-47be-8d9e-3743cff62a79","Type":"ContainerStarted","Data":"6b48d404bd7a27dee305e2c2c133ad3dde8508ea68d12e2837c7b02c6628e051"} Nov 26 23:12:04 crc kubenswrapper[4903]: I1126 23:12:04.120718 4903 generic.go:334] "Generic (PLEG): container finished" podID="ac153cb0-0d58-47be-8d9e-3743cff62a79" containerID="6b48d404bd7a27dee305e2c2c133ad3dde8508ea68d12e2837c7b02c6628e051" exitCode=0 Nov 26 23:12:04 crc kubenswrapper[4903]: I1126 23:12:04.121139 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fbtdw" event={"ID":"ac153cb0-0d58-47be-8d9e-3743cff62a79","Type":"ContainerDied","Data":"6b48d404bd7a27dee305e2c2c133ad3dde8508ea68d12e2837c7b02c6628e051"} Nov 26 23:12:05 crc kubenswrapper[4903]: I1126 23:12:05.140895 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fbtdw" event={"ID":"ac153cb0-0d58-47be-8d9e-3743cff62a79","Type":"ContainerStarted","Data":"f752e700a6737bac55f70e5364fb6d3c7c66f743f70ee0e15771a2b994602fcd"} Nov 26 23:12:05 crc kubenswrapper[4903]: I1126 23:12:05.159176 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-fbtdw" podStartSLOduration=3.495534462 podStartE2EDuration="6.159161471s" podCreationTimestamp="2025-11-26 23:11:59 +0000 UTC" firstStartedPulling="2025-11-26 23:12:02.096448943 +0000 UTC m=+3050.786683863" lastFinishedPulling="2025-11-26 23:12:04.760075942 +0000 UTC m=+3053.450310872" observedRunningTime="2025-11-26 23:12:05.155612356 +0000 UTC m=+3053.845847336" watchObservedRunningTime="2025-11-26 23:12:05.159161471 +0000 UTC m=+3053.849396381" Nov 26 23:12:10 crc kubenswrapper[4903]: I1126 23:12:10.328983 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-fbtdw" Nov 26 23:12:10 crc kubenswrapper[4903]: I1126 23:12:10.329619 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-fbtdw" Nov 26 23:12:10 crc kubenswrapper[4903]: I1126 23:12:10.384258 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-fbtdw" Nov 26 23:12:11 crc kubenswrapper[4903]: I1126 23:12:11.268444 4903 generic.go:334] "Generic (PLEG): container finished" podID="78703fde-a3cc-4241-940e-f92a638f8549" containerID="3756a93108af044937cf708039605fd867d13ad48831c3b8022ed4ccff680afc" exitCode=0 Nov 26 23:12:11 crc kubenswrapper[4903]: I1126 23:12:11.268530 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk" event={"ID":"78703fde-a3cc-4241-940e-f92a638f8549","Type":"ContainerDied","Data":"3756a93108af044937cf708039605fd867d13ad48831c3b8022ed4ccff680afc"} Nov 26 23:12:11 crc kubenswrapper[4903]: I1126 23:12:11.363500 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-fbtdw" Nov 26 23:12:11 crc kubenswrapper[4903]: I1126 23:12:11.443816 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fbtdw"] Nov 26 23:12:12 crc kubenswrapper[4903]: I1126 23:12:12.812904 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk" Nov 26 23:12:12 crc kubenswrapper[4903]: I1126 23:12:12.950983 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-inventory\") pod \"78703fde-a3cc-4241-940e-f92a638f8549\" (UID: \"78703fde-a3cc-4241-940e-f92a638f8549\") " Nov 26 23:12:12 crc kubenswrapper[4903]: I1126 23:12:12.951138 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-ceilometer-compute-config-data-2\") pod \"78703fde-a3cc-4241-940e-f92a638f8549\" (UID: \"78703fde-a3cc-4241-940e-f92a638f8549\") " Nov 26 23:12:12 crc kubenswrapper[4903]: I1126 23:12:12.951212 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r7bxh\" (UniqueName: \"kubernetes.io/projected/78703fde-a3cc-4241-940e-f92a638f8549-kube-api-access-r7bxh\") pod \"78703fde-a3cc-4241-940e-f92a638f8549\" (UID: \"78703fde-a3cc-4241-940e-f92a638f8549\") " Nov 26 23:12:12 crc kubenswrapper[4903]: I1126 23:12:12.951279 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-ceilometer-compute-config-data-1\") pod \"78703fde-a3cc-4241-940e-f92a638f8549\" (UID: \"78703fde-a3cc-4241-940e-f92a638f8549\") " Nov 26 23:12:12 crc kubenswrapper[4903]: I1126 23:12:12.951348 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-ssh-key\") pod \"78703fde-a3cc-4241-940e-f92a638f8549\" (UID: \"78703fde-a3cc-4241-940e-f92a638f8549\") " Nov 26 23:12:12 crc kubenswrapper[4903]: I1126 23:12:12.951374 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-ceilometer-compute-config-data-0\") pod \"78703fde-a3cc-4241-940e-f92a638f8549\" (UID: \"78703fde-a3cc-4241-940e-f92a638f8549\") " Nov 26 23:12:12 crc kubenswrapper[4903]: I1126 23:12:12.951447 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-telemetry-combined-ca-bundle\") pod \"78703fde-a3cc-4241-940e-f92a638f8549\" (UID: \"78703fde-a3cc-4241-940e-f92a638f8549\") " Nov 26 23:12:12 crc kubenswrapper[4903]: I1126 23:12:12.957281 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78703fde-a3cc-4241-940e-f92a638f8549-kube-api-access-r7bxh" (OuterVolumeSpecName: "kube-api-access-r7bxh") pod "78703fde-a3cc-4241-940e-f92a638f8549" (UID: "78703fde-a3cc-4241-940e-f92a638f8549"). InnerVolumeSpecName "kube-api-access-r7bxh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:12:12 crc kubenswrapper[4903]: I1126 23:12:12.957951 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "78703fde-a3cc-4241-940e-f92a638f8549" (UID: "78703fde-a3cc-4241-940e-f92a638f8549"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:12:12 crc kubenswrapper[4903]: I1126 23:12:12.983994 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "78703fde-a3cc-4241-940e-f92a638f8549" (UID: "78703fde-a3cc-4241-940e-f92a638f8549"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:12:12 crc kubenswrapper[4903]: I1126 23:12:12.995508 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "78703fde-a3cc-4241-940e-f92a638f8549" (UID: "78703fde-a3cc-4241-940e-f92a638f8549"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.019901 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-inventory" (OuterVolumeSpecName: "inventory") pod "78703fde-a3cc-4241-940e-f92a638f8549" (UID: "78703fde-a3cc-4241-940e-f92a638f8549"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.022658 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "78703fde-a3cc-4241-940e-f92a638f8549" (UID: "78703fde-a3cc-4241-940e-f92a638f8549"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.022971 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "78703fde-a3cc-4241-940e-f92a638f8549" (UID: "78703fde-a3cc-4241-940e-f92a638f8549"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.055031 4903 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.055065 4903 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.055080 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r7bxh\" (UniqueName: \"kubernetes.io/projected/78703fde-a3cc-4241-940e-f92a638f8549-kube-api-access-r7bxh\") on node \"crc\" DevicePath \"\"" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.055094 4903 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.055106 4903 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.055118 4903 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.055130 4903 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78703fde-a3cc-4241-940e-f92a638f8549-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.293421 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk" event={"ID":"78703fde-a3cc-4241-940e-f92a638f8549","Type":"ContainerDied","Data":"da7bd45da6d3671150fb90d1dac42ee8d11af1d1cc8c1bf83d614aa8730866d9"} Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.293900 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="da7bd45da6d3671150fb90d1dac42ee8d11af1d1cc8c1bf83d614aa8730866d9" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.293610 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-fbtdw" podUID="ac153cb0-0d58-47be-8d9e-3743cff62a79" containerName="registry-server" containerID="cri-o://f752e700a6737bac55f70e5364fb6d3c7c66f743f70ee0e15771a2b994602fcd" gracePeriod=2 Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.293482 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.459973 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5"] Nov 26 23:12:13 crc kubenswrapper[4903]: E1126 23:12:13.460493 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78703fde-a3cc-4241-940e-f92a638f8549" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.460514 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="78703fde-a3cc-4241-940e-f92a638f8549" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.460885 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="78703fde-a3cc-4241-940e-f92a638f8549" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.461874 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.464216 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-62bf2" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.465923 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.466131 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.466247 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.473279 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-ipmi-config-data" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.499663 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5"] Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.568238 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-telemetry-power-monitoring-combined-ca-bundle\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5\" (UID: \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.568306 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s6xlv\" (UniqueName: \"kubernetes.io/projected/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-kube-api-access-s6xlv\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5\" (UID: \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.568342 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-ceilometer-ipmi-config-data-2\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5\" (UID: \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.568454 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-ceilometer-ipmi-config-data-0\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5\" (UID: \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.568518 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-inventory\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5\" (UID: \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.568547 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-ceilometer-ipmi-config-data-1\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5\" (UID: \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.568570 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-ssh-key\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5\" (UID: \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.670457 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-telemetry-power-monitoring-combined-ca-bundle\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5\" (UID: \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.670518 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s6xlv\" (UniqueName: \"kubernetes.io/projected/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-kube-api-access-s6xlv\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5\" (UID: \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.670551 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-ceilometer-ipmi-config-data-2\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5\" (UID: \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.670630 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-ceilometer-ipmi-config-data-0\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5\" (UID: \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.670829 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-inventory\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5\" (UID: \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.670870 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-ssh-key\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5\" (UID: \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.670886 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-ceilometer-ipmi-config-data-1\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5\" (UID: \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.674758 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-ssh-key\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5\" (UID: \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.674890 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-ceilometer-ipmi-config-data-1\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5\" (UID: \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.674970 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-ceilometer-ipmi-config-data-2\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5\" (UID: \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.675524 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-ceilometer-ipmi-config-data-0\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5\" (UID: \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.676091 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-telemetry-power-monitoring-combined-ca-bundle\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5\" (UID: \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.679508 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-inventory\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5\" (UID: \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.687045 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s6xlv\" (UniqueName: \"kubernetes.io/projected/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-kube-api-access-s6xlv\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5\" (UID: \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5" Nov 26 23:12:13 crc kubenswrapper[4903]: I1126 23:12:13.795241 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5" Nov 26 23:12:14 crc kubenswrapper[4903]: I1126 23:12:14.309405 4903 generic.go:334] "Generic (PLEG): container finished" podID="ac153cb0-0d58-47be-8d9e-3743cff62a79" containerID="f752e700a6737bac55f70e5364fb6d3c7c66f743f70ee0e15771a2b994602fcd" exitCode=0 Nov 26 23:12:14 crc kubenswrapper[4903]: I1126 23:12:14.309870 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fbtdw" event={"ID":"ac153cb0-0d58-47be-8d9e-3743cff62a79","Type":"ContainerDied","Data":"f752e700a6737bac55f70e5364fb6d3c7c66f743f70ee0e15771a2b994602fcd"} Nov 26 23:12:14 crc kubenswrapper[4903]: W1126 23:12:14.433906 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5582dbe8_0a07_4c5f_9054_5e0bc32c2819.slice/crio-e46a779fec79ff640aec1eb616822e85070aa118c2104d3a38137c83ece7a165 WatchSource:0}: Error finding container e46a779fec79ff640aec1eb616822e85070aa118c2104d3a38137c83ece7a165: Status 404 returned error can't find the container with id e46a779fec79ff640aec1eb616822e85070aa118c2104d3a38137c83ece7a165 Nov 26 23:12:14 crc kubenswrapper[4903]: I1126 23:12:14.434397 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5"] Nov 26 23:12:14 crc kubenswrapper[4903]: I1126 23:12:14.487116 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fbtdw" Nov 26 23:12:14 crc kubenswrapper[4903]: I1126 23:12:14.600882 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac153cb0-0d58-47be-8d9e-3743cff62a79-catalog-content\") pod \"ac153cb0-0d58-47be-8d9e-3743cff62a79\" (UID: \"ac153cb0-0d58-47be-8d9e-3743cff62a79\") " Nov 26 23:12:14 crc kubenswrapper[4903]: I1126 23:12:14.601018 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pzfwm\" (UniqueName: \"kubernetes.io/projected/ac153cb0-0d58-47be-8d9e-3743cff62a79-kube-api-access-pzfwm\") pod \"ac153cb0-0d58-47be-8d9e-3743cff62a79\" (UID: \"ac153cb0-0d58-47be-8d9e-3743cff62a79\") " Nov 26 23:12:14 crc kubenswrapper[4903]: I1126 23:12:14.601076 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac153cb0-0d58-47be-8d9e-3743cff62a79-utilities\") pod \"ac153cb0-0d58-47be-8d9e-3743cff62a79\" (UID: \"ac153cb0-0d58-47be-8d9e-3743cff62a79\") " Nov 26 23:12:14 crc kubenswrapper[4903]: I1126 23:12:14.601887 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ac153cb0-0d58-47be-8d9e-3743cff62a79-utilities" (OuterVolumeSpecName: "utilities") pod "ac153cb0-0d58-47be-8d9e-3743cff62a79" (UID: "ac153cb0-0d58-47be-8d9e-3743cff62a79"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:12:14 crc kubenswrapper[4903]: I1126 23:12:14.608194 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac153cb0-0d58-47be-8d9e-3743cff62a79-kube-api-access-pzfwm" (OuterVolumeSpecName: "kube-api-access-pzfwm") pod "ac153cb0-0d58-47be-8d9e-3743cff62a79" (UID: "ac153cb0-0d58-47be-8d9e-3743cff62a79"). InnerVolumeSpecName "kube-api-access-pzfwm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:12:14 crc kubenswrapper[4903]: I1126 23:12:14.644948 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ac153cb0-0d58-47be-8d9e-3743cff62a79-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ac153cb0-0d58-47be-8d9e-3743cff62a79" (UID: "ac153cb0-0d58-47be-8d9e-3743cff62a79"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:12:14 crc kubenswrapper[4903]: I1126 23:12:14.704821 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ac153cb0-0d58-47be-8d9e-3743cff62a79-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 23:12:14 crc kubenswrapper[4903]: I1126 23:12:14.704881 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pzfwm\" (UniqueName: \"kubernetes.io/projected/ac153cb0-0d58-47be-8d9e-3743cff62a79-kube-api-access-pzfwm\") on node \"crc\" DevicePath \"\"" Nov 26 23:12:14 crc kubenswrapper[4903]: I1126 23:12:14.704899 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ac153cb0-0d58-47be-8d9e-3743cff62a79-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 23:12:15 crc kubenswrapper[4903]: I1126 23:12:15.329547 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fbtdw" Nov 26 23:12:15 crc kubenswrapper[4903]: I1126 23:12:15.329587 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fbtdw" event={"ID":"ac153cb0-0d58-47be-8d9e-3743cff62a79","Type":"ContainerDied","Data":"1c6c9a45e48679214061cdcb4f6dc7221b0bab5336b0e4814d4d952a7a3a1a4c"} Nov 26 23:12:15 crc kubenswrapper[4903]: I1126 23:12:15.329641 4903 scope.go:117] "RemoveContainer" containerID="f752e700a6737bac55f70e5364fb6d3c7c66f743f70ee0e15771a2b994602fcd" Nov 26 23:12:15 crc kubenswrapper[4903]: I1126 23:12:15.339259 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5" event={"ID":"5582dbe8-0a07-4c5f-9054-5e0bc32c2819","Type":"ContainerStarted","Data":"e46a779fec79ff640aec1eb616822e85070aa118c2104d3a38137c83ece7a165"} Nov 26 23:12:15 crc kubenswrapper[4903]: I1126 23:12:15.371909 4903 scope.go:117] "RemoveContainer" containerID="6b48d404bd7a27dee305e2c2c133ad3dde8508ea68d12e2837c7b02c6628e051" Nov 26 23:12:15 crc kubenswrapper[4903]: I1126 23:12:15.383088 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fbtdw"] Nov 26 23:12:15 crc kubenswrapper[4903]: I1126 23:12:15.410244 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-fbtdw"] Nov 26 23:12:15 crc kubenswrapper[4903]: I1126 23:12:15.417853 4903 scope.go:117] "RemoveContainer" containerID="1efcee3ce167d48850cdecb1e6417c19f6c7f0abe7947168abc09aac5c5e59fb" Nov 26 23:12:16 crc kubenswrapper[4903]: I1126 23:12:16.045003 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac153cb0-0d58-47be-8d9e-3743cff62a79" path="/var/lib/kubelet/pods/ac153cb0-0d58-47be-8d9e-3743cff62a79/volumes" Nov 26 23:12:16 crc kubenswrapper[4903]: I1126 23:12:16.367418 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5" event={"ID":"5582dbe8-0a07-4c5f-9054-5e0bc32c2819","Type":"ContainerStarted","Data":"4c567b8fc4f973445532d008e2f9e38a9a397c86013ef57a00c331ff7a4b8ae8"} Nov 26 23:12:16 crc kubenswrapper[4903]: I1126 23:12:16.402882 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5" podStartSLOduration=2.760190038 podStartE2EDuration="3.402862068s" podCreationTimestamp="2025-11-26 23:12:13 +0000 UTC" firstStartedPulling="2025-11-26 23:12:14.4355487 +0000 UTC m=+3063.125783610" lastFinishedPulling="2025-11-26 23:12:15.07822069 +0000 UTC m=+3063.768455640" observedRunningTime="2025-11-26 23:12:16.390732764 +0000 UTC m=+3065.080967694" watchObservedRunningTime="2025-11-26 23:12:16.402862068 +0000 UTC m=+3065.093096978" Nov 26 23:12:31 crc kubenswrapper[4903]: I1126 23:12:31.981391 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 23:12:31 crc kubenswrapper[4903]: I1126 23:12:31.981978 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 23:13:01 crc kubenswrapper[4903]: I1126 23:13:01.996490 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 23:13:01 crc kubenswrapper[4903]: I1126 23:13:01.997102 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 23:13:31 crc kubenswrapper[4903]: I1126 23:13:31.980886 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 23:13:31 crc kubenswrapper[4903]: I1126 23:13:31.981531 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 23:13:31 crc kubenswrapper[4903]: I1126 23:13:31.981590 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 23:13:31 crc kubenswrapper[4903]: I1126 23:13:31.982838 4903 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9"} pod="openshift-machine-config-operator/machine-config-daemon-wjwph" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 23:13:31 crc kubenswrapper[4903]: I1126 23:13:31.982958 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" containerID="cri-o://07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9" gracePeriod=600 Nov 26 23:13:32 crc kubenswrapper[4903]: E1126 23:13:32.131090 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:13:32 crc kubenswrapper[4903]: I1126 23:13:32.399914 4903 generic.go:334] "Generic (PLEG): container finished" podID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerID="07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9" exitCode=0 Nov 26 23:13:32 crc kubenswrapper[4903]: I1126 23:13:32.399976 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerDied","Data":"07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9"} Nov 26 23:13:32 crc kubenswrapper[4903]: I1126 23:13:32.400028 4903 scope.go:117] "RemoveContainer" containerID="b4f2997ac38c47b9761af8aec04555c21367d1561cc50b6d8dc9ce9b0946fee7" Nov 26 23:13:32 crc kubenswrapper[4903]: I1126 23:13:32.401082 4903 scope.go:117] "RemoveContainer" containerID="07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9" Nov 26 23:13:32 crc kubenswrapper[4903]: E1126 23:13:32.401578 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:13:44 crc kubenswrapper[4903]: I1126 23:13:44.029164 4903 scope.go:117] "RemoveContainer" containerID="07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9" Nov 26 23:13:44 crc kubenswrapper[4903]: E1126 23:13:44.031462 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:13:58 crc kubenswrapper[4903]: I1126 23:13:58.029077 4903 scope.go:117] "RemoveContainer" containerID="07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9" Nov 26 23:13:58 crc kubenswrapper[4903]: E1126 23:13:58.030281 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:14:11 crc kubenswrapper[4903]: I1126 23:14:11.029028 4903 scope.go:117] "RemoveContainer" containerID="07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9" Nov 26 23:14:11 crc kubenswrapper[4903]: E1126 23:14:11.029897 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:14:23 crc kubenswrapper[4903]: I1126 23:14:23.029859 4903 scope.go:117] "RemoveContainer" containerID="07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9" Nov 26 23:14:23 crc kubenswrapper[4903]: E1126 23:14:23.031049 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:14:35 crc kubenswrapper[4903]: I1126 23:14:35.028071 4903 scope.go:117] "RemoveContainer" containerID="07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9" Nov 26 23:14:35 crc kubenswrapper[4903]: E1126 23:14:35.028917 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:14:37 crc kubenswrapper[4903]: I1126 23:14:37.336118 4903 generic.go:334] "Generic (PLEG): container finished" podID="5582dbe8-0a07-4c5f-9054-5e0bc32c2819" containerID="4c567b8fc4f973445532d008e2f9e38a9a397c86013ef57a00c331ff7a4b8ae8" exitCode=0 Nov 26 23:14:37 crc kubenswrapper[4903]: I1126 23:14:37.336190 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5" event={"ID":"5582dbe8-0a07-4c5f-9054-5e0bc32c2819","Type":"ContainerDied","Data":"4c567b8fc4f973445532d008e2f9e38a9a397c86013ef57a00c331ff7a4b8ae8"} Nov 26 23:14:38 crc kubenswrapper[4903]: I1126 23:14:38.873756 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.042260 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-ceilometer-ipmi-config-data-2\") pod \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\" (UID: \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\") " Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.042350 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-telemetry-power-monitoring-combined-ca-bundle\") pod \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\" (UID: \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\") " Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.042420 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-ceilometer-ipmi-config-data-1\") pod \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\" (UID: \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\") " Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.042613 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-inventory\") pod \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\" (UID: \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\") " Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.042660 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-ceilometer-ipmi-config-data-0\") pod \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\" (UID: \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\") " Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.042699 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-ssh-key\") pod \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\" (UID: \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\") " Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.042836 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s6xlv\" (UniqueName: \"kubernetes.io/projected/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-kube-api-access-s6xlv\") pod \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\" (UID: \"5582dbe8-0a07-4c5f-9054-5e0bc32c2819\") " Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.075255 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-kube-api-access-s6xlv" (OuterVolumeSpecName: "kube-api-access-s6xlv") pod "5582dbe8-0a07-4c5f-9054-5e0bc32c2819" (UID: "5582dbe8-0a07-4c5f-9054-5e0bc32c2819"). InnerVolumeSpecName "kube-api-access-s6xlv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.075544 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-telemetry-power-monitoring-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-power-monitoring-combined-ca-bundle") pod "5582dbe8-0a07-4c5f-9054-5e0bc32c2819" (UID: "5582dbe8-0a07-4c5f-9054-5e0bc32c2819"). InnerVolumeSpecName "telemetry-power-monitoring-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.078890 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-ceilometer-ipmi-config-data-0" (OuterVolumeSpecName: "ceilometer-ipmi-config-data-0") pod "5582dbe8-0a07-4c5f-9054-5e0bc32c2819" (UID: "5582dbe8-0a07-4c5f-9054-5e0bc32c2819"). InnerVolumeSpecName "ceilometer-ipmi-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.079723 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "5582dbe8-0a07-4c5f-9054-5e0bc32c2819" (UID: "5582dbe8-0a07-4c5f-9054-5e0bc32c2819"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.096216 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-inventory" (OuterVolumeSpecName: "inventory") pod "5582dbe8-0a07-4c5f-9054-5e0bc32c2819" (UID: "5582dbe8-0a07-4c5f-9054-5e0bc32c2819"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.101679 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-ceilometer-ipmi-config-data-2" (OuterVolumeSpecName: "ceilometer-ipmi-config-data-2") pod "5582dbe8-0a07-4c5f-9054-5e0bc32c2819" (UID: "5582dbe8-0a07-4c5f-9054-5e0bc32c2819"). InnerVolumeSpecName "ceilometer-ipmi-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.125499 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-ceilometer-ipmi-config-data-1" (OuterVolumeSpecName: "ceilometer-ipmi-config-data-1") pod "5582dbe8-0a07-4c5f-9054-5e0bc32c2819" (UID: "5582dbe8-0a07-4c5f-9054-5e0bc32c2819"). InnerVolumeSpecName "ceilometer-ipmi-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.149519 4903 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.149552 4903 reconciler_common.go:293] "Volume detached for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-ceilometer-ipmi-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.149564 4903 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.149574 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s6xlv\" (UniqueName: \"kubernetes.io/projected/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-kube-api-access-s6xlv\") on node \"crc\" DevicePath \"\"" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.149583 4903 reconciler_common.go:293] "Volume detached for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-ceilometer-ipmi-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.149593 4903 reconciler_common.go:293] "Volume detached for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-telemetry-power-monitoring-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.149605 4903 reconciler_common.go:293] "Volume detached for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/5582dbe8-0a07-4c5f-9054-5e0bc32c2819-ceilometer-ipmi-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.363895 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5" event={"ID":"5582dbe8-0a07-4c5f-9054-5e0bc32c2819","Type":"ContainerDied","Data":"e46a779fec79ff640aec1eb616822e85070aa118c2104d3a38137c83ece7a165"} Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.364202 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e46a779fec79ff640aec1eb616822e85070aa118c2104d3a38137c83ece7a165" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.363975 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.449970 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-bg8bq"] Nov 26 23:14:39 crc kubenswrapper[4903]: E1126 23:14:39.450440 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5582dbe8-0a07-4c5f-9054-5e0bc32c2819" containerName="telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.450461 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="5582dbe8-0a07-4c5f-9054-5e0bc32c2819" containerName="telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam" Nov 26 23:14:39 crc kubenswrapper[4903]: E1126 23:14:39.450492 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac153cb0-0d58-47be-8d9e-3743cff62a79" containerName="registry-server" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.450500 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac153cb0-0d58-47be-8d9e-3743cff62a79" containerName="registry-server" Nov 26 23:14:39 crc kubenswrapper[4903]: E1126 23:14:39.450508 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac153cb0-0d58-47be-8d9e-3743cff62a79" containerName="extract-content" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.450513 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac153cb0-0d58-47be-8d9e-3743cff62a79" containerName="extract-content" Nov 26 23:14:39 crc kubenswrapper[4903]: E1126 23:14:39.450545 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac153cb0-0d58-47be-8d9e-3743cff62a79" containerName="extract-utilities" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.450551 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac153cb0-0d58-47be-8d9e-3743cff62a79" containerName="extract-utilities" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.450799 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac153cb0-0d58-47be-8d9e-3743cff62a79" containerName="registry-server" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.450819 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="5582dbe8-0a07-4c5f-9054-5e0bc32c2819" containerName="telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.451592 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-bg8bq" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.454183 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.454421 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.454934 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"logging-compute-config-data" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.460544 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.461564 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-62bf2" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.497759 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-bg8bq"] Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.556760 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c177b1fa-8c5c-43f2-bb1d-c1695ccf0050-inventory\") pod \"logging-edpm-deployment-openstack-edpm-ipam-bg8bq\" (UID: \"c177b1fa-8c5c-43f2-bb1d-c1695ccf0050\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-bg8bq" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.556857 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/c177b1fa-8c5c-43f2-bb1d-c1695ccf0050-logging-compute-config-data-0\") pod \"logging-edpm-deployment-openstack-edpm-ipam-bg8bq\" (UID: \"c177b1fa-8c5c-43f2-bb1d-c1695ccf0050\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-bg8bq" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.557747 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/c177b1fa-8c5c-43f2-bb1d-c1695ccf0050-logging-compute-config-data-1\") pod \"logging-edpm-deployment-openstack-edpm-ipam-bg8bq\" (UID: \"c177b1fa-8c5c-43f2-bb1d-c1695ccf0050\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-bg8bq" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.557844 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jw9s4\" (UniqueName: \"kubernetes.io/projected/c177b1fa-8c5c-43f2-bb1d-c1695ccf0050-kube-api-access-jw9s4\") pod \"logging-edpm-deployment-openstack-edpm-ipam-bg8bq\" (UID: \"c177b1fa-8c5c-43f2-bb1d-c1695ccf0050\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-bg8bq" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.557875 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c177b1fa-8c5c-43f2-bb1d-c1695ccf0050-ssh-key\") pod \"logging-edpm-deployment-openstack-edpm-ipam-bg8bq\" (UID: \"c177b1fa-8c5c-43f2-bb1d-c1695ccf0050\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-bg8bq" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.660265 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c177b1fa-8c5c-43f2-bb1d-c1695ccf0050-inventory\") pod \"logging-edpm-deployment-openstack-edpm-ipam-bg8bq\" (UID: \"c177b1fa-8c5c-43f2-bb1d-c1695ccf0050\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-bg8bq" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.660404 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/c177b1fa-8c5c-43f2-bb1d-c1695ccf0050-logging-compute-config-data-0\") pod \"logging-edpm-deployment-openstack-edpm-ipam-bg8bq\" (UID: \"c177b1fa-8c5c-43f2-bb1d-c1695ccf0050\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-bg8bq" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.660497 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/c177b1fa-8c5c-43f2-bb1d-c1695ccf0050-logging-compute-config-data-1\") pod \"logging-edpm-deployment-openstack-edpm-ipam-bg8bq\" (UID: \"c177b1fa-8c5c-43f2-bb1d-c1695ccf0050\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-bg8bq" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.660585 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jw9s4\" (UniqueName: \"kubernetes.io/projected/c177b1fa-8c5c-43f2-bb1d-c1695ccf0050-kube-api-access-jw9s4\") pod \"logging-edpm-deployment-openstack-edpm-ipam-bg8bq\" (UID: \"c177b1fa-8c5c-43f2-bb1d-c1695ccf0050\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-bg8bq" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.660622 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c177b1fa-8c5c-43f2-bb1d-c1695ccf0050-ssh-key\") pod \"logging-edpm-deployment-openstack-edpm-ipam-bg8bq\" (UID: \"c177b1fa-8c5c-43f2-bb1d-c1695ccf0050\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-bg8bq" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.664585 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/c177b1fa-8c5c-43f2-bb1d-c1695ccf0050-logging-compute-config-data-0\") pod \"logging-edpm-deployment-openstack-edpm-ipam-bg8bq\" (UID: \"c177b1fa-8c5c-43f2-bb1d-c1695ccf0050\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-bg8bq" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.664798 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/c177b1fa-8c5c-43f2-bb1d-c1695ccf0050-logging-compute-config-data-1\") pod \"logging-edpm-deployment-openstack-edpm-ipam-bg8bq\" (UID: \"c177b1fa-8c5c-43f2-bb1d-c1695ccf0050\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-bg8bq" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.665154 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c177b1fa-8c5c-43f2-bb1d-c1695ccf0050-ssh-key\") pod \"logging-edpm-deployment-openstack-edpm-ipam-bg8bq\" (UID: \"c177b1fa-8c5c-43f2-bb1d-c1695ccf0050\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-bg8bq" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.665665 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c177b1fa-8c5c-43f2-bb1d-c1695ccf0050-inventory\") pod \"logging-edpm-deployment-openstack-edpm-ipam-bg8bq\" (UID: \"c177b1fa-8c5c-43f2-bb1d-c1695ccf0050\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-bg8bq" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.688581 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jw9s4\" (UniqueName: \"kubernetes.io/projected/c177b1fa-8c5c-43f2-bb1d-c1695ccf0050-kube-api-access-jw9s4\") pod \"logging-edpm-deployment-openstack-edpm-ipam-bg8bq\" (UID: \"c177b1fa-8c5c-43f2-bb1d-c1695ccf0050\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-bg8bq" Nov 26 23:14:39 crc kubenswrapper[4903]: I1126 23:14:39.785135 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-bg8bq" Nov 26 23:14:40 crc kubenswrapper[4903]: I1126 23:14:40.377459 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-bg8bq"] Nov 26 23:14:40 crc kubenswrapper[4903]: W1126 23:14:40.380827 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc177b1fa_8c5c_43f2_bb1d_c1695ccf0050.slice/crio-6935e03a1fcbd921eeed0287551dcaab8a45a1fd675172af5dccc2d7199250e6 WatchSource:0}: Error finding container 6935e03a1fcbd921eeed0287551dcaab8a45a1fd675172af5dccc2d7199250e6: Status 404 returned error can't find the container with id 6935e03a1fcbd921eeed0287551dcaab8a45a1fd675172af5dccc2d7199250e6 Nov 26 23:14:41 crc kubenswrapper[4903]: I1126 23:14:41.402819 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-bg8bq" event={"ID":"c177b1fa-8c5c-43f2-bb1d-c1695ccf0050","Type":"ContainerStarted","Data":"6935e03a1fcbd921eeed0287551dcaab8a45a1fd675172af5dccc2d7199250e6"} Nov 26 23:14:42 crc kubenswrapper[4903]: I1126 23:14:42.414523 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-bg8bq" event={"ID":"c177b1fa-8c5c-43f2-bb1d-c1695ccf0050","Type":"ContainerStarted","Data":"31cb45ba599ec2bda87118c162191ab3d1b9c208187fa2425e79f77d27cac6fa"} Nov 26 23:14:42 crc kubenswrapper[4903]: I1126 23:14:42.438712 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-bg8bq" podStartSLOduration=2.482090796 podStartE2EDuration="3.438666123s" podCreationTimestamp="2025-11-26 23:14:39 +0000 UTC" firstStartedPulling="2025-11-26 23:14:40.384475607 +0000 UTC m=+3209.074710517" lastFinishedPulling="2025-11-26 23:14:41.341050904 +0000 UTC m=+3210.031285844" observedRunningTime="2025-11-26 23:14:42.429494598 +0000 UTC m=+3211.119729528" watchObservedRunningTime="2025-11-26 23:14:42.438666123 +0000 UTC m=+3211.128901043" Nov 26 23:14:50 crc kubenswrapper[4903]: I1126 23:14:50.029189 4903 scope.go:117] "RemoveContainer" containerID="07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9" Nov 26 23:14:50 crc kubenswrapper[4903]: E1126 23:14:50.030276 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:14:58 crc kubenswrapper[4903]: I1126 23:14:58.667010 4903 generic.go:334] "Generic (PLEG): container finished" podID="c177b1fa-8c5c-43f2-bb1d-c1695ccf0050" containerID="31cb45ba599ec2bda87118c162191ab3d1b9c208187fa2425e79f77d27cac6fa" exitCode=0 Nov 26 23:14:58 crc kubenswrapper[4903]: I1126 23:14:58.667126 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-bg8bq" event={"ID":"c177b1fa-8c5c-43f2-bb1d-c1695ccf0050","Type":"ContainerDied","Data":"31cb45ba599ec2bda87118c162191ab3d1b9c208187fa2425e79f77d27cac6fa"} Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.147969 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403315-fdxf8"] Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.149808 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403315-fdxf8" Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.151845 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.152028 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.170686 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403315-fdxf8"] Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.180260 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-bg8bq" Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.243257 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/c177b1fa-8c5c-43f2-bb1d-c1695ccf0050-logging-compute-config-data-0\") pod \"c177b1fa-8c5c-43f2-bb1d-c1695ccf0050\" (UID: \"c177b1fa-8c5c-43f2-bb1d-c1695ccf0050\") " Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.243304 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c177b1fa-8c5c-43f2-bb1d-c1695ccf0050-inventory\") pod \"c177b1fa-8c5c-43f2-bb1d-c1695ccf0050\" (UID: \"c177b1fa-8c5c-43f2-bb1d-c1695ccf0050\") " Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.243366 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c177b1fa-8c5c-43f2-bb1d-c1695ccf0050-ssh-key\") pod \"c177b1fa-8c5c-43f2-bb1d-c1695ccf0050\" (UID: \"c177b1fa-8c5c-43f2-bb1d-c1695ccf0050\") " Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.243551 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jw9s4\" (UniqueName: \"kubernetes.io/projected/c177b1fa-8c5c-43f2-bb1d-c1695ccf0050-kube-api-access-jw9s4\") pod \"c177b1fa-8c5c-43f2-bb1d-c1695ccf0050\" (UID: \"c177b1fa-8c5c-43f2-bb1d-c1695ccf0050\") " Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.243580 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/c177b1fa-8c5c-43f2-bb1d-c1695ccf0050-logging-compute-config-data-1\") pod \"c177b1fa-8c5c-43f2-bb1d-c1695ccf0050\" (UID: \"c177b1fa-8c5c-43f2-bb1d-c1695ccf0050\") " Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.243984 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0abe270e-982f-47b0-9635-b267b2095aa4-secret-volume\") pod \"collect-profiles-29403315-fdxf8\" (UID: \"0abe270e-982f-47b0-9635-b267b2095aa4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403315-fdxf8" Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.244528 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0abe270e-982f-47b0-9635-b267b2095aa4-config-volume\") pod \"collect-profiles-29403315-fdxf8\" (UID: \"0abe270e-982f-47b0-9635-b267b2095aa4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403315-fdxf8" Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.244567 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njzts\" (UniqueName: \"kubernetes.io/projected/0abe270e-982f-47b0-9635-b267b2095aa4-kube-api-access-njzts\") pod \"collect-profiles-29403315-fdxf8\" (UID: \"0abe270e-982f-47b0-9635-b267b2095aa4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403315-fdxf8" Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.254743 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c177b1fa-8c5c-43f2-bb1d-c1695ccf0050-kube-api-access-jw9s4" (OuterVolumeSpecName: "kube-api-access-jw9s4") pod "c177b1fa-8c5c-43f2-bb1d-c1695ccf0050" (UID: "c177b1fa-8c5c-43f2-bb1d-c1695ccf0050"). InnerVolumeSpecName "kube-api-access-jw9s4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.276950 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c177b1fa-8c5c-43f2-bb1d-c1695ccf0050-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c177b1fa-8c5c-43f2-bb1d-c1695ccf0050" (UID: "c177b1fa-8c5c-43f2-bb1d-c1695ccf0050"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.277423 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c177b1fa-8c5c-43f2-bb1d-c1695ccf0050-inventory" (OuterVolumeSpecName: "inventory") pod "c177b1fa-8c5c-43f2-bb1d-c1695ccf0050" (UID: "c177b1fa-8c5c-43f2-bb1d-c1695ccf0050"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.281994 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c177b1fa-8c5c-43f2-bb1d-c1695ccf0050-logging-compute-config-data-0" (OuterVolumeSpecName: "logging-compute-config-data-0") pod "c177b1fa-8c5c-43f2-bb1d-c1695ccf0050" (UID: "c177b1fa-8c5c-43f2-bb1d-c1695ccf0050"). InnerVolumeSpecName "logging-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.296094 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c177b1fa-8c5c-43f2-bb1d-c1695ccf0050-logging-compute-config-data-1" (OuterVolumeSpecName: "logging-compute-config-data-1") pod "c177b1fa-8c5c-43f2-bb1d-c1695ccf0050" (UID: "c177b1fa-8c5c-43f2-bb1d-c1695ccf0050"). InnerVolumeSpecName "logging-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.346102 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0abe270e-982f-47b0-9635-b267b2095aa4-secret-volume\") pod \"collect-profiles-29403315-fdxf8\" (UID: \"0abe270e-982f-47b0-9635-b267b2095aa4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403315-fdxf8" Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.346600 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0abe270e-982f-47b0-9635-b267b2095aa4-config-volume\") pod \"collect-profiles-29403315-fdxf8\" (UID: \"0abe270e-982f-47b0-9635-b267b2095aa4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403315-fdxf8" Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.346703 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njzts\" (UniqueName: \"kubernetes.io/projected/0abe270e-982f-47b0-9635-b267b2095aa4-kube-api-access-njzts\") pod \"collect-profiles-29403315-fdxf8\" (UID: \"0abe270e-982f-47b0-9635-b267b2095aa4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403315-fdxf8" Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.346863 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jw9s4\" (UniqueName: \"kubernetes.io/projected/c177b1fa-8c5c-43f2-bb1d-c1695ccf0050-kube-api-access-jw9s4\") on node \"crc\" DevicePath \"\"" Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.346945 4903 reconciler_common.go:293] "Volume detached for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/c177b1fa-8c5c-43f2-bb1d-c1695ccf0050-logging-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.347203 4903 reconciler_common.go:293] "Volume detached for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/c177b1fa-8c5c-43f2-bb1d-c1695ccf0050-logging-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.347287 4903 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c177b1fa-8c5c-43f2-bb1d-c1695ccf0050-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.347409 4903 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c177b1fa-8c5c-43f2-bb1d-c1695ccf0050-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.347436 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0abe270e-982f-47b0-9635-b267b2095aa4-config-volume\") pod \"collect-profiles-29403315-fdxf8\" (UID: \"0abe270e-982f-47b0-9635-b267b2095aa4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403315-fdxf8" Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.349297 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0abe270e-982f-47b0-9635-b267b2095aa4-secret-volume\") pod \"collect-profiles-29403315-fdxf8\" (UID: \"0abe270e-982f-47b0-9635-b267b2095aa4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403315-fdxf8" Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.361309 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njzts\" (UniqueName: \"kubernetes.io/projected/0abe270e-982f-47b0-9635-b267b2095aa4-kube-api-access-njzts\") pod \"collect-profiles-29403315-fdxf8\" (UID: \"0abe270e-982f-47b0-9635-b267b2095aa4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403315-fdxf8" Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.494891 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403315-fdxf8" Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.698391 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-bg8bq" event={"ID":"c177b1fa-8c5c-43f2-bb1d-c1695ccf0050","Type":"ContainerDied","Data":"6935e03a1fcbd921eeed0287551dcaab8a45a1fd675172af5dccc2d7199250e6"} Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.698679 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6935e03a1fcbd921eeed0287551dcaab8a45a1fd675172af5dccc2d7199250e6" Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.698428 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-bg8bq" Nov 26 23:15:00 crc kubenswrapper[4903]: I1126 23:15:00.950235 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403315-fdxf8"] Nov 26 23:15:01 crc kubenswrapper[4903]: I1126 23:15:01.712939 4903 generic.go:334] "Generic (PLEG): container finished" podID="0abe270e-982f-47b0-9635-b267b2095aa4" containerID="86714bef74405f7fc7cb3807df2ff2a67d87861904205d3f136d82d52d46e74f" exitCode=0 Nov 26 23:15:01 crc kubenswrapper[4903]: I1126 23:15:01.713012 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403315-fdxf8" event={"ID":"0abe270e-982f-47b0-9635-b267b2095aa4","Type":"ContainerDied","Data":"86714bef74405f7fc7cb3807df2ff2a67d87861904205d3f136d82d52d46e74f"} Nov 26 23:15:01 crc kubenswrapper[4903]: I1126 23:15:01.713886 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403315-fdxf8" event={"ID":"0abe270e-982f-47b0-9635-b267b2095aa4","Type":"ContainerStarted","Data":"64edbd72b2cac11f2dabc338f7386cfc70c355d04e202bd628be50cb1c2d1c55"} Nov 26 23:15:03 crc kubenswrapper[4903]: I1126 23:15:03.029528 4903 scope.go:117] "RemoveContainer" containerID="07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9" Nov 26 23:15:03 crc kubenswrapper[4903]: E1126 23:15:03.030194 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:15:03 crc kubenswrapper[4903]: I1126 23:15:03.111468 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403315-fdxf8" Nov 26 23:15:03 crc kubenswrapper[4903]: I1126 23:15:03.216587 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0abe270e-982f-47b0-9635-b267b2095aa4-config-volume\") pod \"0abe270e-982f-47b0-9635-b267b2095aa4\" (UID: \"0abe270e-982f-47b0-9635-b267b2095aa4\") " Nov 26 23:15:03 crc kubenswrapper[4903]: I1126 23:15:03.216742 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0abe270e-982f-47b0-9635-b267b2095aa4-secret-volume\") pod \"0abe270e-982f-47b0-9635-b267b2095aa4\" (UID: \"0abe270e-982f-47b0-9635-b267b2095aa4\") " Nov 26 23:15:03 crc kubenswrapper[4903]: I1126 23:15:03.216766 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-njzts\" (UniqueName: \"kubernetes.io/projected/0abe270e-982f-47b0-9635-b267b2095aa4-kube-api-access-njzts\") pod \"0abe270e-982f-47b0-9635-b267b2095aa4\" (UID: \"0abe270e-982f-47b0-9635-b267b2095aa4\") " Nov 26 23:15:03 crc kubenswrapper[4903]: I1126 23:15:03.217352 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0abe270e-982f-47b0-9635-b267b2095aa4-config-volume" (OuterVolumeSpecName: "config-volume") pod "0abe270e-982f-47b0-9635-b267b2095aa4" (UID: "0abe270e-982f-47b0-9635-b267b2095aa4"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 23:15:03 crc kubenswrapper[4903]: I1126 23:15:03.222370 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0abe270e-982f-47b0-9635-b267b2095aa4-kube-api-access-njzts" (OuterVolumeSpecName: "kube-api-access-njzts") pod "0abe270e-982f-47b0-9635-b267b2095aa4" (UID: "0abe270e-982f-47b0-9635-b267b2095aa4"). InnerVolumeSpecName "kube-api-access-njzts". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:15:03 crc kubenswrapper[4903]: I1126 23:15:03.222724 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0abe270e-982f-47b0-9635-b267b2095aa4-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "0abe270e-982f-47b0-9635-b267b2095aa4" (UID: "0abe270e-982f-47b0-9635-b267b2095aa4"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:15:03 crc kubenswrapper[4903]: I1126 23:15:03.322253 4903 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0abe270e-982f-47b0-9635-b267b2095aa4-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 23:15:03 crc kubenswrapper[4903]: I1126 23:15:03.322290 4903 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0abe270e-982f-47b0-9635-b267b2095aa4-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 23:15:03 crc kubenswrapper[4903]: I1126 23:15:03.322301 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-njzts\" (UniqueName: \"kubernetes.io/projected/0abe270e-982f-47b0-9635-b267b2095aa4-kube-api-access-njzts\") on node \"crc\" DevicePath \"\"" Nov 26 23:15:03 crc kubenswrapper[4903]: I1126 23:15:03.737655 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403315-fdxf8" event={"ID":"0abe270e-982f-47b0-9635-b267b2095aa4","Type":"ContainerDied","Data":"64edbd72b2cac11f2dabc338f7386cfc70c355d04e202bd628be50cb1c2d1c55"} Nov 26 23:15:03 crc kubenswrapper[4903]: I1126 23:15:03.737757 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64edbd72b2cac11f2dabc338f7386cfc70c355d04e202bd628be50cb1c2d1c55" Nov 26 23:15:03 crc kubenswrapper[4903]: I1126 23:15:03.737711 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403315-fdxf8" Nov 26 23:15:04 crc kubenswrapper[4903]: I1126 23:15:04.210483 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403270-2527k"] Nov 26 23:15:04 crc kubenswrapper[4903]: I1126 23:15:04.224413 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403270-2527k"] Nov 26 23:15:06 crc kubenswrapper[4903]: I1126 23:15:06.048670 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77e6fff5-e9a3-46c5-98b8-b0085f5de807" path="/var/lib/kubelet/pods/77e6fff5-e9a3-46c5-98b8-b0085f5de807/volumes" Nov 26 23:15:17 crc kubenswrapper[4903]: I1126 23:15:17.028767 4903 scope.go:117] "RemoveContainer" containerID="07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9" Nov 26 23:15:17 crc kubenswrapper[4903]: E1126 23:15:17.029476 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:15:22 crc kubenswrapper[4903]: I1126 23:15:22.385173 4903 scope.go:117] "RemoveContainer" containerID="613653e85df9f97e0f794a2d98e8f23047b3f69ac4a2fac47d49382033d1875b" Nov 26 23:15:29 crc kubenswrapper[4903]: I1126 23:15:29.028992 4903 scope.go:117] "RemoveContainer" containerID="07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9" Nov 26 23:15:29 crc kubenswrapper[4903]: E1126 23:15:29.030502 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:15:44 crc kubenswrapper[4903]: I1126 23:15:44.029133 4903 scope.go:117] "RemoveContainer" containerID="07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9" Nov 26 23:15:44 crc kubenswrapper[4903]: E1126 23:15:44.030388 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:15:55 crc kubenswrapper[4903]: I1126 23:15:55.029578 4903 scope.go:117] "RemoveContainer" containerID="07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9" Nov 26 23:15:55 crc kubenswrapper[4903]: E1126 23:15:55.030327 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:16:06 crc kubenswrapper[4903]: I1126 23:16:06.028850 4903 scope.go:117] "RemoveContainer" containerID="07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9" Nov 26 23:16:06 crc kubenswrapper[4903]: E1126 23:16:06.029651 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:16:18 crc kubenswrapper[4903]: I1126 23:16:18.029357 4903 scope.go:117] "RemoveContainer" containerID="07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9" Nov 26 23:16:18 crc kubenswrapper[4903]: E1126 23:16:18.030740 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:16:29 crc kubenswrapper[4903]: I1126 23:16:29.028750 4903 scope.go:117] "RemoveContainer" containerID="07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9" Nov 26 23:16:29 crc kubenswrapper[4903]: E1126 23:16:29.029504 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:16:40 crc kubenswrapper[4903]: I1126 23:16:40.028950 4903 scope.go:117] "RemoveContainer" containerID="07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9" Nov 26 23:16:40 crc kubenswrapper[4903]: E1126 23:16:40.030267 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:16:51 crc kubenswrapper[4903]: I1126 23:16:51.029107 4903 scope.go:117] "RemoveContainer" containerID="07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9" Nov 26 23:16:51 crc kubenswrapper[4903]: E1126 23:16:51.030186 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:17:02 crc kubenswrapper[4903]: I1126 23:17:02.036156 4903 scope.go:117] "RemoveContainer" containerID="07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9" Nov 26 23:17:02 crc kubenswrapper[4903]: E1126 23:17:02.036833 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:17:16 crc kubenswrapper[4903]: I1126 23:17:16.029729 4903 scope.go:117] "RemoveContainer" containerID="07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9" Nov 26 23:17:16 crc kubenswrapper[4903]: E1126 23:17:16.030835 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:17:31 crc kubenswrapper[4903]: I1126 23:17:31.029620 4903 scope.go:117] "RemoveContainer" containerID="07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9" Nov 26 23:17:31 crc kubenswrapper[4903]: E1126 23:17:31.031212 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:17:43 crc kubenswrapper[4903]: I1126 23:17:43.029174 4903 scope.go:117] "RemoveContainer" containerID="07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9" Nov 26 23:17:43 crc kubenswrapper[4903]: E1126 23:17:43.030346 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:17:54 crc kubenswrapper[4903]: I1126 23:17:54.028403 4903 scope.go:117] "RemoveContainer" containerID="07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9" Nov 26 23:17:54 crc kubenswrapper[4903]: E1126 23:17:54.029252 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:18:06 crc kubenswrapper[4903]: I1126 23:18:06.029058 4903 scope.go:117] "RemoveContainer" containerID="07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9" Nov 26 23:18:06 crc kubenswrapper[4903]: E1126 23:18:06.030450 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:18:15 crc kubenswrapper[4903]: I1126 23:18:15.314724 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4s426"] Nov 26 23:18:15 crc kubenswrapper[4903]: E1126 23:18:15.316668 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c177b1fa-8c5c-43f2-bb1d-c1695ccf0050" containerName="logging-edpm-deployment-openstack-edpm-ipam" Nov 26 23:18:15 crc kubenswrapper[4903]: I1126 23:18:15.316687 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="c177b1fa-8c5c-43f2-bb1d-c1695ccf0050" containerName="logging-edpm-deployment-openstack-edpm-ipam" Nov 26 23:18:15 crc kubenswrapper[4903]: E1126 23:18:15.316751 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0abe270e-982f-47b0-9635-b267b2095aa4" containerName="collect-profiles" Nov 26 23:18:15 crc kubenswrapper[4903]: I1126 23:18:15.316765 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="0abe270e-982f-47b0-9635-b267b2095aa4" containerName="collect-profiles" Nov 26 23:18:15 crc kubenswrapper[4903]: I1126 23:18:15.317433 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="c177b1fa-8c5c-43f2-bb1d-c1695ccf0050" containerName="logging-edpm-deployment-openstack-edpm-ipam" Nov 26 23:18:15 crc kubenswrapper[4903]: I1126 23:18:15.317491 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="0abe270e-982f-47b0-9635-b267b2095aa4" containerName="collect-profiles" Nov 26 23:18:15 crc kubenswrapper[4903]: I1126 23:18:15.322575 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4s426" Nov 26 23:18:15 crc kubenswrapper[4903]: I1126 23:18:15.360110 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4s426"] Nov 26 23:18:15 crc kubenswrapper[4903]: I1126 23:18:15.483181 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c0e9ea5-8d5e-4b36-88ce-efbb5340b228-utilities\") pod \"redhat-operators-4s426\" (UID: \"1c0e9ea5-8d5e-4b36-88ce-efbb5340b228\") " pod="openshift-marketplace/redhat-operators-4s426" Nov 26 23:18:15 crc kubenswrapper[4903]: I1126 23:18:15.483239 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpslf\" (UniqueName: \"kubernetes.io/projected/1c0e9ea5-8d5e-4b36-88ce-efbb5340b228-kube-api-access-kpslf\") pod \"redhat-operators-4s426\" (UID: \"1c0e9ea5-8d5e-4b36-88ce-efbb5340b228\") " pod="openshift-marketplace/redhat-operators-4s426" Nov 26 23:18:15 crc kubenswrapper[4903]: I1126 23:18:15.483278 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c0e9ea5-8d5e-4b36-88ce-efbb5340b228-catalog-content\") pod \"redhat-operators-4s426\" (UID: \"1c0e9ea5-8d5e-4b36-88ce-efbb5340b228\") " pod="openshift-marketplace/redhat-operators-4s426" Nov 26 23:18:15 crc kubenswrapper[4903]: I1126 23:18:15.585562 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c0e9ea5-8d5e-4b36-88ce-efbb5340b228-utilities\") pod \"redhat-operators-4s426\" (UID: \"1c0e9ea5-8d5e-4b36-88ce-efbb5340b228\") " pod="openshift-marketplace/redhat-operators-4s426" Nov 26 23:18:15 crc kubenswrapper[4903]: I1126 23:18:15.585608 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpslf\" (UniqueName: \"kubernetes.io/projected/1c0e9ea5-8d5e-4b36-88ce-efbb5340b228-kube-api-access-kpslf\") pod \"redhat-operators-4s426\" (UID: \"1c0e9ea5-8d5e-4b36-88ce-efbb5340b228\") " pod="openshift-marketplace/redhat-operators-4s426" Nov 26 23:18:15 crc kubenswrapper[4903]: I1126 23:18:15.585634 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c0e9ea5-8d5e-4b36-88ce-efbb5340b228-catalog-content\") pod \"redhat-operators-4s426\" (UID: \"1c0e9ea5-8d5e-4b36-88ce-efbb5340b228\") " pod="openshift-marketplace/redhat-operators-4s426" Nov 26 23:18:15 crc kubenswrapper[4903]: I1126 23:18:15.586251 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c0e9ea5-8d5e-4b36-88ce-efbb5340b228-catalog-content\") pod \"redhat-operators-4s426\" (UID: \"1c0e9ea5-8d5e-4b36-88ce-efbb5340b228\") " pod="openshift-marketplace/redhat-operators-4s426" Nov 26 23:18:15 crc kubenswrapper[4903]: I1126 23:18:15.586375 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c0e9ea5-8d5e-4b36-88ce-efbb5340b228-utilities\") pod \"redhat-operators-4s426\" (UID: \"1c0e9ea5-8d5e-4b36-88ce-efbb5340b228\") " pod="openshift-marketplace/redhat-operators-4s426" Nov 26 23:18:15 crc kubenswrapper[4903]: I1126 23:18:15.604014 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpslf\" (UniqueName: \"kubernetes.io/projected/1c0e9ea5-8d5e-4b36-88ce-efbb5340b228-kube-api-access-kpslf\") pod \"redhat-operators-4s426\" (UID: \"1c0e9ea5-8d5e-4b36-88ce-efbb5340b228\") " pod="openshift-marketplace/redhat-operators-4s426" Nov 26 23:18:15 crc kubenswrapper[4903]: I1126 23:18:15.654792 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4s426" Nov 26 23:18:16 crc kubenswrapper[4903]: I1126 23:18:16.143346 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4s426"] Nov 26 23:18:16 crc kubenswrapper[4903]: I1126 23:18:16.377935 4903 generic.go:334] "Generic (PLEG): container finished" podID="1c0e9ea5-8d5e-4b36-88ce-efbb5340b228" containerID="897dafe1d0946b110c9a670f92c2ad192ba5161e3bfc58ecd7f7644d15cca9bd" exitCode=0 Nov 26 23:18:16 crc kubenswrapper[4903]: I1126 23:18:16.377973 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4s426" event={"ID":"1c0e9ea5-8d5e-4b36-88ce-efbb5340b228","Type":"ContainerDied","Data":"897dafe1d0946b110c9a670f92c2ad192ba5161e3bfc58ecd7f7644d15cca9bd"} Nov 26 23:18:16 crc kubenswrapper[4903]: I1126 23:18:16.377996 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4s426" event={"ID":"1c0e9ea5-8d5e-4b36-88ce-efbb5340b228","Type":"ContainerStarted","Data":"fff6772d2955307850c79cd65665910e4255637b4a8b3cb28cf47247e5a6762e"} Nov 26 23:18:16 crc kubenswrapper[4903]: I1126 23:18:16.383776 4903 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 23:18:18 crc kubenswrapper[4903]: I1126 23:18:18.411034 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4s426" event={"ID":"1c0e9ea5-8d5e-4b36-88ce-efbb5340b228","Type":"ContainerStarted","Data":"27282d09bafd3cfd669bba2d2da12302719603b5d8513941f952c10693075554"} Nov 26 23:18:20 crc kubenswrapper[4903]: I1126 23:18:20.440663 4903 generic.go:334] "Generic (PLEG): container finished" podID="1c0e9ea5-8d5e-4b36-88ce-efbb5340b228" containerID="27282d09bafd3cfd669bba2d2da12302719603b5d8513941f952c10693075554" exitCode=0 Nov 26 23:18:20 crc kubenswrapper[4903]: I1126 23:18:20.440801 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4s426" event={"ID":"1c0e9ea5-8d5e-4b36-88ce-efbb5340b228","Type":"ContainerDied","Data":"27282d09bafd3cfd669bba2d2da12302719603b5d8513941f952c10693075554"} Nov 26 23:18:21 crc kubenswrapper[4903]: I1126 23:18:21.029246 4903 scope.go:117] "RemoveContainer" containerID="07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9" Nov 26 23:18:21 crc kubenswrapper[4903]: E1126 23:18:21.029977 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:18:21 crc kubenswrapper[4903]: I1126 23:18:21.457628 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4s426" event={"ID":"1c0e9ea5-8d5e-4b36-88ce-efbb5340b228","Type":"ContainerStarted","Data":"c1e4e706f0358e245c8d69e012f15ab68b2f34a55c46f291411a9b66b9a062e4"} Nov 26 23:18:21 crc kubenswrapper[4903]: I1126 23:18:21.506485 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4s426" podStartSLOduration=1.884718605 podStartE2EDuration="6.506466386s" podCreationTimestamp="2025-11-26 23:18:15 +0000 UTC" firstStartedPulling="2025-11-26 23:18:16.383453506 +0000 UTC m=+3425.073688416" lastFinishedPulling="2025-11-26 23:18:21.005201287 +0000 UTC m=+3429.695436197" observedRunningTime="2025-11-26 23:18:21.495902335 +0000 UTC m=+3430.186137255" watchObservedRunningTime="2025-11-26 23:18:21.506466386 +0000 UTC m=+3430.196701286" Nov 26 23:18:25 crc kubenswrapper[4903]: I1126 23:18:25.655646 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4s426" Nov 26 23:18:25 crc kubenswrapper[4903]: I1126 23:18:25.656462 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4s426" Nov 26 23:18:26 crc kubenswrapper[4903]: I1126 23:18:26.736398 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-4s426" podUID="1c0e9ea5-8d5e-4b36-88ce-efbb5340b228" containerName="registry-server" probeResult="failure" output=< Nov 26 23:18:26 crc kubenswrapper[4903]: timeout: failed to connect service ":50051" within 1s Nov 26 23:18:26 crc kubenswrapper[4903]: > Nov 26 23:18:34 crc kubenswrapper[4903]: I1126 23:18:34.029835 4903 scope.go:117] "RemoveContainer" containerID="07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9" Nov 26 23:18:34 crc kubenswrapper[4903]: I1126 23:18:34.649474 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerStarted","Data":"27d445047f53e9bdb50cf786946986bbc016581b4443a5826b3fe320ad548c3c"} Nov 26 23:18:35 crc kubenswrapper[4903]: I1126 23:18:35.751031 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4s426" Nov 26 23:18:35 crc kubenswrapper[4903]: I1126 23:18:35.848578 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4s426" Nov 26 23:18:36 crc kubenswrapper[4903]: I1126 23:18:36.008629 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4s426"] Nov 26 23:18:37 crc kubenswrapper[4903]: I1126 23:18:37.692045 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-4s426" podUID="1c0e9ea5-8d5e-4b36-88ce-efbb5340b228" containerName="registry-server" containerID="cri-o://c1e4e706f0358e245c8d69e012f15ab68b2f34a55c46f291411a9b66b9a062e4" gracePeriod=2 Nov 26 23:18:38 crc kubenswrapper[4903]: I1126 23:18:38.279818 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4s426" Nov 26 23:18:38 crc kubenswrapper[4903]: I1126 23:18:38.408735 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c0e9ea5-8d5e-4b36-88ce-efbb5340b228-utilities\") pod \"1c0e9ea5-8d5e-4b36-88ce-efbb5340b228\" (UID: \"1c0e9ea5-8d5e-4b36-88ce-efbb5340b228\") " Nov 26 23:18:38 crc kubenswrapper[4903]: I1126 23:18:38.409151 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c0e9ea5-8d5e-4b36-88ce-efbb5340b228-catalog-content\") pod \"1c0e9ea5-8d5e-4b36-88ce-efbb5340b228\" (UID: \"1c0e9ea5-8d5e-4b36-88ce-efbb5340b228\") " Nov 26 23:18:38 crc kubenswrapper[4903]: I1126 23:18:38.409277 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kpslf\" (UniqueName: \"kubernetes.io/projected/1c0e9ea5-8d5e-4b36-88ce-efbb5340b228-kube-api-access-kpslf\") pod \"1c0e9ea5-8d5e-4b36-88ce-efbb5340b228\" (UID: \"1c0e9ea5-8d5e-4b36-88ce-efbb5340b228\") " Nov 26 23:18:38 crc kubenswrapper[4903]: I1126 23:18:38.410847 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c0e9ea5-8d5e-4b36-88ce-efbb5340b228-utilities" (OuterVolumeSpecName: "utilities") pod "1c0e9ea5-8d5e-4b36-88ce-efbb5340b228" (UID: "1c0e9ea5-8d5e-4b36-88ce-efbb5340b228"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:18:38 crc kubenswrapper[4903]: I1126 23:18:38.419448 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c0e9ea5-8d5e-4b36-88ce-efbb5340b228-kube-api-access-kpslf" (OuterVolumeSpecName: "kube-api-access-kpslf") pod "1c0e9ea5-8d5e-4b36-88ce-efbb5340b228" (UID: "1c0e9ea5-8d5e-4b36-88ce-efbb5340b228"). InnerVolumeSpecName "kube-api-access-kpslf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:18:38 crc kubenswrapper[4903]: I1126 23:18:38.507604 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c0e9ea5-8d5e-4b36-88ce-efbb5340b228-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1c0e9ea5-8d5e-4b36-88ce-efbb5340b228" (UID: "1c0e9ea5-8d5e-4b36-88ce-efbb5340b228"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:18:38 crc kubenswrapper[4903]: I1126 23:18:38.512274 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c0e9ea5-8d5e-4b36-88ce-efbb5340b228-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 23:18:38 crc kubenswrapper[4903]: I1126 23:18:38.512307 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kpslf\" (UniqueName: \"kubernetes.io/projected/1c0e9ea5-8d5e-4b36-88ce-efbb5340b228-kube-api-access-kpslf\") on node \"crc\" DevicePath \"\"" Nov 26 23:18:38 crc kubenswrapper[4903]: I1126 23:18:38.512320 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c0e9ea5-8d5e-4b36-88ce-efbb5340b228-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 23:18:38 crc kubenswrapper[4903]: I1126 23:18:38.710017 4903 generic.go:334] "Generic (PLEG): container finished" podID="1c0e9ea5-8d5e-4b36-88ce-efbb5340b228" containerID="c1e4e706f0358e245c8d69e012f15ab68b2f34a55c46f291411a9b66b9a062e4" exitCode=0 Nov 26 23:18:38 crc kubenswrapper[4903]: I1126 23:18:38.710068 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4s426" event={"ID":"1c0e9ea5-8d5e-4b36-88ce-efbb5340b228","Type":"ContainerDied","Data":"c1e4e706f0358e245c8d69e012f15ab68b2f34a55c46f291411a9b66b9a062e4"} Nov 26 23:18:38 crc kubenswrapper[4903]: I1126 23:18:38.710101 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4s426" event={"ID":"1c0e9ea5-8d5e-4b36-88ce-efbb5340b228","Type":"ContainerDied","Data":"fff6772d2955307850c79cd65665910e4255637b4a8b3cb28cf47247e5a6762e"} Nov 26 23:18:38 crc kubenswrapper[4903]: I1126 23:18:38.710122 4903 scope.go:117] "RemoveContainer" containerID="c1e4e706f0358e245c8d69e012f15ab68b2f34a55c46f291411a9b66b9a062e4" Nov 26 23:18:38 crc kubenswrapper[4903]: I1126 23:18:38.710220 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4s426" Nov 26 23:18:38 crc kubenswrapper[4903]: I1126 23:18:38.767321 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4s426"] Nov 26 23:18:38 crc kubenswrapper[4903]: I1126 23:18:38.773582 4903 scope.go:117] "RemoveContainer" containerID="27282d09bafd3cfd669bba2d2da12302719603b5d8513941f952c10693075554" Nov 26 23:18:38 crc kubenswrapper[4903]: I1126 23:18:38.781312 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-4s426"] Nov 26 23:18:39 crc kubenswrapper[4903]: I1126 23:18:39.656790 4903 scope.go:117] "RemoveContainer" containerID="897dafe1d0946b110c9a670f92c2ad192ba5161e3bfc58ecd7f7644d15cca9bd" Nov 26 23:18:39 crc kubenswrapper[4903]: I1126 23:18:39.705862 4903 scope.go:117] "RemoveContainer" containerID="c1e4e706f0358e245c8d69e012f15ab68b2f34a55c46f291411a9b66b9a062e4" Nov 26 23:18:39 crc kubenswrapper[4903]: E1126 23:18:39.708108 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c1e4e706f0358e245c8d69e012f15ab68b2f34a55c46f291411a9b66b9a062e4\": container with ID starting with c1e4e706f0358e245c8d69e012f15ab68b2f34a55c46f291411a9b66b9a062e4 not found: ID does not exist" containerID="c1e4e706f0358e245c8d69e012f15ab68b2f34a55c46f291411a9b66b9a062e4" Nov 26 23:18:39 crc kubenswrapper[4903]: I1126 23:18:39.708153 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1e4e706f0358e245c8d69e012f15ab68b2f34a55c46f291411a9b66b9a062e4"} err="failed to get container status \"c1e4e706f0358e245c8d69e012f15ab68b2f34a55c46f291411a9b66b9a062e4\": rpc error: code = NotFound desc = could not find container \"c1e4e706f0358e245c8d69e012f15ab68b2f34a55c46f291411a9b66b9a062e4\": container with ID starting with c1e4e706f0358e245c8d69e012f15ab68b2f34a55c46f291411a9b66b9a062e4 not found: ID does not exist" Nov 26 23:18:39 crc kubenswrapper[4903]: I1126 23:18:39.708182 4903 scope.go:117] "RemoveContainer" containerID="27282d09bafd3cfd669bba2d2da12302719603b5d8513941f952c10693075554" Nov 26 23:18:39 crc kubenswrapper[4903]: E1126 23:18:39.709074 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"27282d09bafd3cfd669bba2d2da12302719603b5d8513941f952c10693075554\": container with ID starting with 27282d09bafd3cfd669bba2d2da12302719603b5d8513941f952c10693075554 not found: ID does not exist" containerID="27282d09bafd3cfd669bba2d2da12302719603b5d8513941f952c10693075554" Nov 26 23:18:39 crc kubenswrapper[4903]: I1126 23:18:39.709100 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"27282d09bafd3cfd669bba2d2da12302719603b5d8513941f952c10693075554"} err="failed to get container status \"27282d09bafd3cfd669bba2d2da12302719603b5d8513941f952c10693075554\": rpc error: code = NotFound desc = could not find container \"27282d09bafd3cfd669bba2d2da12302719603b5d8513941f952c10693075554\": container with ID starting with 27282d09bafd3cfd669bba2d2da12302719603b5d8513941f952c10693075554 not found: ID does not exist" Nov 26 23:18:39 crc kubenswrapper[4903]: I1126 23:18:39.709116 4903 scope.go:117] "RemoveContainer" containerID="897dafe1d0946b110c9a670f92c2ad192ba5161e3bfc58ecd7f7644d15cca9bd" Nov 26 23:18:39 crc kubenswrapper[4903]: E1126 23:18:39.709396 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"897dafe1d0946b110c9a670f92c2ad192ba5161e3bfc58ecd7f7644d15cca9bd\": container with ID starting with 897dafe1d0946b110c9a670f92c2ad192ba5161e3bfc58ecd7f7644d15cca9bd not found: ID does not exist" containerID="897dafe1d0946b110c9a670f92c2ad192ba5161e3bfc58ecd7f7644d15cca9bd" Nov 26 23:18:39 crc kubenswrapper[4903]: I1126 23:18:39.709436 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"897dafe1d0946b110c9a670f92c2ad192ba5161e3bfc58ecd7f7644d15cca9bd"} err="failed to get container status \"897dafe1d0946b110c9a670f92c2ad192ba5161e3bfc58ecd7f7644d15cca9bd\": rpc error: code = NotFound desc = could not find container \"897dafe1d0946b110c9a670f92c2ad192ba5161e3bfc58ecd7f7644d15cca9bd\": container with ID starting with 897dafe1d0946b110c9a670f92c2ad192ba5161e3bfc58ecd7f7644d15cca9bd not found: ID does not exist" Nov 26 23:18:40 crc kubenswrapper[4903]: I1126 23:18:40.046829 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c0e9ea5-8d5e-4b36-88ce-efbb5340b228" path="/var/lib/kubelet/pods/1c0e9ea5-8d5e-4b36-88ce-efbb5340b228/volumes" Nov 26 23:19:16 crc kubenswrapper[4903]: I1126 23:19:16.106781 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-hdlpl"] Nov 26 23:19:16 crc kubenswrapper[4903]: E1126 23:19:16.108128 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c0e9ea5-8d5e-4b36-88ce-efbb5340b228" containerName="extract-utilities" Nov 26 23:19:16 crc kubenswrapper[4903]: I1126 23:19:16.108151 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c0e9ea5-8d5e-4b36-88ce-efbb5340b228" containerName="extract-utilities" Nov 26 23:19:16 crc kubenswrapper[4903]: E1126 23:19:16.108184 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c0e9ea5-8d5e-4b36-88ce-efbb5340b228" containerName="registry-server" Nov 26 23:19:16 crc kubenswrapper[4903]: I1126 23:19:16.108197 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c0e9ea5-8d5e-4b36-88ce-efbb5340b228" containerName="registry-server" Nov 26 23:19:16 crc kubenswrapper[4903]: E1126 23:19:16.108236 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c0e9ea5-8d5e-4b36-88ce-efbb5340b228" containerName="extract-content" Nov 26 23:19:16 crc kubenswrapper[4903]: I1126 23:19:16.108249 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c0e9ea5-8d5e-4b36-88ce-efbb5340b228" containerName="extract-content" Nov 26 23:19:16 crc kubenswrapper[4903]: I1126 23:19:16.108685 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c0e9ea5-8d5e-4b36-88ce-efbb5340b228" containerName="registry-server" Nov 26 23:19:16 crc kubenswrapper[4903]: I1126 23:19:16.113329 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hdlpl" Nov 26 23:19:16 crc kubenswrapper[4903]: I1126 23:19:16.129932 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hdlpl"] Nov 26 23:19:16 crc kubenswrapper[4903]: I1126 23:19:16.257442 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09d8322b-deff-46eb-9c12-b79df04d29f8-catalog-content\") pod \"community-operators-hdlpl\" (UID: \"09d8322b-deff-46eb-9c12-b79df04d29f8\") " pod="openshift-marketplace/community-operators-hdlpl" Nov 26 23:19:16 crc kubenswrapper[4903]: I1126 23:19:16.257896 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9vwg\" (UniqueName: \"kubernetes.io/projected/09d8322b-deff-46eb-9c12-b79df04d29f8-kube-api-access-q9vwg\") pod \"community-operators-hdlpl\" (UID: \"09d8322b-deff-46eb-9c12-b79df04d29f8\") " pod="openshift-marketplace/community-operators-hdlpl" Nov 26 23:19:16 crc kubenswrapper[4903]: I1126 23:19:16.257940 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09d8322b-deff-46eb-9c12-b79df04d29f8-utilities\") pod \"community-operators-hdlpl\" (UID: \"09d8322b-deff-46eb-9c12-b79df04d29f8\") " pod="openshift-marketplace/community-operators-hdlpl" Nov 26 23:19:16 crc kubenswrapper[4903]: I1126 23:19:16.361120 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09d8322b-deff-46eb-9c12-b79df04d29f8-catalog-content\") pod \"community-operators-hdlpl\" (UID: \"09d8322b-deff-46eb-9c12-b79df04d29f8\") " pod="openshift-marketplace/community-operators-hdlpl" Nov 26 23:19:16 crc kubenswrapper[4903]: I1126 23:19:16.361598 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09d8322b-deff-46eb-9c12-b79df04d29f8-catalog-content\") pod \"community-operators-hdlpl\" (UID: \"09d8322b-deff-46eb-9c12-b79df04d29f8\") " pod="openshift-marketplace/community-operators-hdlpl" Nov 26 23:19:16 crc kubenswrapper[4903]: I1126 23:19:16.361795 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9vwg\" (UniqueName: \"kubernetes.io/projected/09d8322b-deff-46eb-9c12-b79df04d29f8-kube-api-access-q9vwg\") pod \"community-operators-hdlpl\" (UID: \"09d8322b-deff-46eb-9c12-b79df04d29f8\") " pod="openshift-marketplace/community-operators-hdlpl" Nov 26 23:19:16 crc kubenswrapper[4903]: I1126 23:19:16.361830 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09d8322b-deff-46eb-9c12-b79df04d29f8-utilities\") pod \"community-operators-hdlpl\" (UID: \"09d8322b-deff-46eb-9c12-b79df04d29f8\") " pod="openshift-marketplace/community-operators-hdlpl" Nov 26 23:19:16 crc kubenswrapper[4903]: I1126 23:19:16.362088 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09d8322b-deff-46eb-9c12-b79df04d29f8-utilities\") pod \"community-operators-hdlpl\" (UID: \"09d8322b-deff-46eb-9c12-b79df04d29f8\") " pod="openshift-marketplace/community-operators-hdlpl" Nov 26 23:19:16 crc kubenswrapper[4903]: I1126 23:19:16.380738 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9vwg\" (UniqueName: \"kubernetes.io/projected/09d8322b-deff-46eb-9c12-b79df04d29f8-kube-api-access-q9vwg\") pod \"community-operators-hdlpl\" (UID: \"09d8322b-deff-46eb-9c12-b79df04d29f8\") " pod="openshift-marketplace/community-operators-hdlpl" Nov 26 23:19:16 crc kubenswrapper[4903]: I1126 23:19:16.438282 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hdlpl" Nov 26 23:19:16 crc kubenswrapper[4903]: I1126 23:19:16.991393 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hdlpl"] Nov 26 23:19:17 crc kubenswrapper[4903]: I1126 23:19:17.258723 4903 generic.go:334] "Generic (PLEG): container finished" podID="09d8322b-deff-46eb-9c12-b79df04d29f8" containerID="aaa6d3e4956913218b62679ae156477bd1b1b93fca97ec5480a564e8c6bb01b1" exitCode=0 Nov 26 23:19:17 crc kubenswrapper[4903]: I1126 23:19:17.258770 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hdlpl" event={"ID":"09d8322b-deff-46eb-9c12-b79df04d29f8","Type":"ContainerDied","Data":"aaa6d3e4956913218b62679ae156477bd1b1b93fca97ec5480a564e8c6bb01b1"} Nov 26 23:19:17 crc kubenswrapper[4903]: I1126 23:19:17.258811 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hdlpl" event={"ID":"09d8322b-deff-46eb-9c12-b79df04d29f8","Type":"ContainerStarted","Data":"3fc89c896ae1b959bab741ba075efe73acbb687b20df16fcc2917bfe53ec77b2"} Nov 26 23:19:18 crc kubenswrapper[4903]: I1126 23:19:18.274516 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hdlpl" event={"ID":"09d8322b-deff-46eb-9c12-b79df04d29f8","Type":"ContainerStarted","Data":"bf0d999b2394cab7ea0f588075ea39635e02647640c73d376e17ee12ca51c381"} Nov 26 23:19:19 crc kubenswrapper[4903]: I1126 23:19:19.292761 4903 generic.go:334] "Generic (PLEG): container finished" podID="09d8322b-deff-46eb-9c12-b79df04d29f8" containerID="bf0d999b2394cab7ea0f588075ea39635e02647640c73d376e17ee12ca51c381" exitCode=0 Nov 26 23:19:19 crc kubenswrapper[4903]: I1126 23:19:19.292813 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hdlpl" event={"ID":"09d8322b-deff-46eb-9c12-b79df04d29f8","Type":"ContainerDied","Data":"bf0d999b2394cab7ea0f588075ea39635e02647640c73d376e17ee12ca51c381"} Nov 26 23:19:20 crc kubenswrapper[4903]: I1126 23:19:20.307895 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hdlpl" event={"ID":"09d8322b-deff-46eb-9c12-b79df04d29f8","Type":"ContainerStarted","Data":"99d5b4c65046bb0c1d58163e3e4a5481cb9ffba18413ec23c3f5ac7dc1f33b52"} Nov 26 23:19:20 crc kubenswrapper[4903]: I1126 23:19:20.339347 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-hdlpl" podStartSLOduration=1.6281939520000002 podStartE2EDuration="4.339325389s" podCreationTimestamp="2025-11-26 23:19:16 +0000 UTC" firstStartedPulling="2025-11-26 23:19:17.262185371 +0000 UTC m=+3485.952420281" lastFinishedPulling="2025-11-26 23:19:19.973316808 +0000 UTC m=+3488.663551718" observedRunningTime="2025-11-26 23:19:20.329257798 +0000 UTC m=+3489.019492718" watchObservedRunningTime="2025-11-26 23:19:20.339325389 +0000 UTC m=+3489.029560309" Nov 26 23:19:26 crc kubenswrapper[4903]: I1126 23:19:26.438728 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-hdlpl" Nov 26 23:19:26 crc kubenswrapper[4903]: I1126 23:19:26.439038 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-hdlpl" Nov 26 23:19:26 crc kubenswrapper[4903]: I1126 23:19:26.524933 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-hdlpl" Nov 26 23:19:27 crc kubenswrapper[4903]: I1126 23:19:27.475638 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-hdlpl" Nov 26 23:19:27 crc kubenswrapper[4903]: I1126 23:19:27.555013 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hdlpl"] Nov 26 23:19:29 crc kubenswrapper[4903]: I1126 23:19:29.439995 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-hdlpl" podUID="09d8322b-deff-46eb-9c12-b79df04d29f8" containerName="registry-server" containerID="cri-o://99d5b4c65046bb0c1d58163e3e4a5481cb9ffba18413ec23c3f5ac7dc1f33b52" gracePeriod=2 Nov 26 23:19:30 crc kubenswrapper[4903]: I1126 23:19:30.452683 4903 generic.go:334] "Generic (PLEG): container finished" podID="09d8322b-deff-46eb-9c12-b79df04d29f8" containerID="99d5b4c65046bb0c1d58163e3e4a5481cb9ffba18413ec23c3f5ac7dc1f33b52" exitCode=0 Nov 26 23:19:30 crc kubenswrapper[4903]: I1126 23:19:30.452737 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hdlpl" event={"ID":"09d8322b-deff-46eb-9c12-b79df04d29f8","Type":"ContainerDied","Data":"99d5b4c65046bb0c1d58163e3e4a5481cb9ffba18413ec23c3f5ac7dc1f33b52"} Nov 26 23:19:30 crc kubenswrapper[4903]: I1126 23:19:30.452964 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hdlpl" event={"ID":"09d8322b-deff-46eb-9c12-b79df04d29f8","Type":"ContainerDied","Data":"3fc89c896ae1b959bab741ba075efe73acbb687b20df16fcc2917bfe53ec77b2"} Nov 26 23:19:30 crc kubenswrapper[4903]: I1126 23:19:30.452982 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3fc89c896ae1b959bab741ba075efe73acbb687b20df16fcc2917bfe53ec77b2" Nov 26 23:19:30 crc kubenswrapper[4903]: I1126 23:19:30.524669 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hdlpl" Nov 26 23:19:30 crc kubenswrapper[4903]: I1126 23:19:30.687432 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q9vwg\" (UniqueName: \"kubernetes.io/projected/09d8322b-deff-46eb-9c12-b79df04d29f8-kube-api-access-q9vwg\") pod \"09d8322b-deff-46eb-9c12-b79df04d29f8\" (UID: \"09d8322b-deff-46eb-9c12-b79df04d29f8\") " Nov 26 23:19:30 crc kubenswrapper[4903]: I1126 23:19:30.687502 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09d8322b-deff-46eb-9c12-b79df04d29f8-catalog-content\") pod \"09d8322b-deff-46eb-9c12-b79df04d29f8\" (UID: \"09d8322b-deff-46eb-9c12-b79df04d29f8\") " Nov 26 23:19:30 crc kubenswrapper[4903]: I1126 23:19:30.687548 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09d8322b-deff-46eb-9c12-b79df04d29f8-utilities\") pod \"09d8322b-deff-46eb-9c12-b79df04d29f8\" (UID: \"09d8322b-deff-46eb-9c12-b79df04d29f8\") " Nov 26 23:19:30 crc kubenswrapper[4903]: I1126 23:19:30.689326 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09d8322b-deff-46eb-9c12-b79df04d29f8-utilities" (OuterVolumeSpecName: "utilities") pod "09d8322b-deff-46eb-9c12-b79df04d29f8" (UID: "09d8322b-deff-46eb-9c12-b79df04d29f8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:19:30 crc kubenswrapper[4903]: I1126 23:19:30.689854 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09d8322b-deff-46eb-9c12-b79df04d29f8-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 23:19:30 crc kubenswrapper[4903]: I1126 23:19:30.701536 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09d8322b-deff-46eb-9c12-b79df04d29f8-kube-api-access-q9vwg" (OuterVolumeSpecName: "kube-api-access-q9vwg") pod "09d8322b-deff-46eb-9c12-b79df04d29f8" (UID: "09d8322b-deff-46eb-9c12-b79df04d29f8"). InnerVolumeSpecName "kube-api-access-q9vwg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:19:30 crc kubenswrapper[4903]: I1126 23:19:30.756582 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09d8322b-deff-46eb-9c12-b79df04d29f8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "09d8322b-deff-46eb-9c12-b79df04d29f8" (UID: "09d8322b-deff-46eb-9c12-b79df04d29f8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:19:30 crc kubenswrapper[4903]: I1126 23:19:30.792364 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q9vwg\" (UniqueName: \"kubernetes.io/projected/09d8322b-deff-46eb-9c12-b79df04d29f8-kube-api-access-q9vwg\") on node \"crc\" DevicePath \"\"" Nov 26 23:19:30 crc kubenswrapper[4903]: I1126 23:19:30.792390 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09d8322b-deff-46eb-9c12-b79df04d29f8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 23:19:31 crc kubenswrapper[4903]: I1126 23:19:31.465828 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hdlpl" Nov 26 23:19:31 crc kubenswrapper[4903]: I1126 23:19:31.524332 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hdlpl"] Nov 26 23:19:31 crc kubenswrapper[4903]: I1126 23:19:31.539028 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-hdlpl"] Nov 26 23:19:32 crc kubenswrapper[4903]: I1126 23:19:32.039126 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09d8322b-deff-46eb-9c12-b79df04d29f8" path="/var/lib/kubelet/pods/09d8322b-deff-46eb-9c12-b79df04d29f8/volumes" Nov 26 23:19:59 crc kubenswrapper[4903]: I1126 23:19:59.344884 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-cjrmt"] Nov 26 23:19:59 crc kubenswrapper[4903]: E1126 23:19:59.346327 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09d8322b-deff-46eb-9c12-b79df04d29f8" containerName="extract-utilities" Nov 26 23:19:59 crc kubenswrapper[4903]: I1126 23:19:59.346354 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="09d8322b-deff-46eb-9c12-b79df04d29f8" containerName="extract-utilities" Nov 26 23:19:59 crc kubenswrapper[4903]: E1126 23:19:59.346405 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09d8322b-deff-46eb-9c12-b79df04d29f8" containerName="extract-content" Nov 26 23:19:59 crc kubenswrapper[4903]: I1126 23:19:59.346422 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="09d8322b-deff-46eb-9c12-b79df04d29f8" containerName="extract-content" Nov 26 23:19:59 crc kubenswrapper[4903]: E1126 23:19:59.346466 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09d8322b-deff-46eb-9c12-b79df04d29f8" containerName="registry-server" Nov 26 23:19:59 crc kubenswrapper[4903]: I1126 23:19:59.346482 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="09d8322b-deff-46eb-9c12-b79df04d29f8" containerName="registry-server" Nov 26 23:19:59 crc kubenswrapper[4903]: I1126 23:19:59.347313 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="09d8322b-deff-46eb-9c12-b79df04d29f8" containerName="registry-server" Nov 26 23:19:59 crc kubenswrapper[4903]: I1126 23:19:59.350807 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cjrmt" Nov 26 23:19:59 crc kubenswrapper[4903]: I1126 23:19:59.355656 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-cjrmt"] Nov 26 23:19:59 crc kubenswrapper[4903]: I1126 23:19:59.445235 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9fe5aa0a-37f5-44a8-961b-ea84ba342be4-utilities\") pod \"redhat-marketplace-cjrmt\" (UID: \"9fe5aa0a-37f5-44a8-961b-ea84ba342be4\") " pod="openshift-marketplace/redhat-marketplace-cjrmt" Nov 26 23:19:59 crc kubenswrapper[4903]: I1126 23:19:59.445371 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9fe5aa0a-37f5-44a8-961b-ea84ba342be4-catalog-content\") pod \"redhat-marketplace-cjrmt\" (UID: \"9fe5aa0a-37f5-44a8-961b-ea84ba342be4\") " pod="openshift-marketplace/redhat-marketplace-cjrmt" Nov 26 23:19:59 crc kubenswrapper[4903]: I1126 23:19:59.445418 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqp57\" (UniqueName: \"kubernetes.io/projected/9fe5aa0a-37f5-44a8-961b-ea84ba342be4-kube-api-access-mqp57\") pod \"redhat-marketplace-cjrmt\" (UID: \"9fe5aa0a-37f5-44a8-961b-ea84ba342be4\") " pod="openshift-marketplace/redhat-marketplace-cjrmt" Nov 26 23:19:59 crc kubenswrapper[4903]: I1126 23:19:59.548194 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9fe5aa0a-37f5-44a8-961b-ea84ba342be4-utilities\") pod \"redhat-marketplace-cjrmt\" (UID: \"9fe5aa0a-37f5-44a8-961b-ea84ba342be4\") " pod="openshift-marketplace/redhat-marketplace-cjrmt" Nov 26 23:19:59 crc kubenswrapper[4903]: I1126 23:19:59.548279 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9fe5aa0a-37f5-44a8-961b-ea84ba342be4-catalog-content\") pod \"redhat-marketplace-cjrmt\" (UID: \"9fe5aa0a-37f5-44a8-961b-ea84ba342be4\") " pod="openshift-marketplace/redhat-marketplace-cjrmt" Nov 26 23:19:59 crc kubenswrapper[4903]: I1126 23:19:59.548315 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqp57\" (UniqueName: \"kubernetes.io/projected/9fe5aa0a-37f5-44a8-961b-ea84ba342be4-kube-api-access-mqp57\") pod \"redhat-marketplace-cjrmt\" (UID: \"9fe5aa0a-37f5-44a8-961b-ea84ba342be4\") " pod="openshift-marketplace/redhat-marketplace-cjrmt" Nov 26 23:19:59 crc kubenswrapper[4903]: I1126 23:19:59.548953 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9fe5aa0a-37f5-44a8-961b-ea84ba342be4-catalog-content\") pod \"redhat-marketplace-cjrmt\" (UID: \"9fe5aa0a-37f5-44a8-961b-ea84ba342be4\") " pod="openshift-marketplace/redhat-marketplace-cjrmt" Nov 26 23:19:59 crc kubenswrapper[4903]: I1126 23:19:59.548992 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9fe5aa0a-37f5-44a8-961b-ea84ba342be4-utilities\") pod \"redhat-marketplace-cjrmt\" (UID: \"9fe5aa0a-37f5-44a8-961b-ea84ba342be4\") " pod="openshift-marketplace/redhat-marketplace-cjrmt" Nov 26 23:19:59 crc kubenswrapper[4903]: I1126 23:19:59.573817 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqp57\" (UniqueName: \"kubernetes.io/projected/9fe5aa0a-37f5-44a8-961b-ea84ba342be4-kube-api-access-mqp57\") pod \"redhat-marketplace-cjrmt\" (UID: \"9fe5aa0a-37f5-44a8-961b-ea84ba342be4\") " pod="openshift-marketplace/redhat-marketplace-cjrmt" Nov 26 23:19:59 crc kubenswrapper[4903]: I1126 23:19:59.685948 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cjrmt" Nov 26 23:20:00 crc kubenswrapper[4903]: I1126 23:20:00.142254 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-cjrmt"] Nov 26 23:20:00 crc kubenswrapper[4903]: W1126 23:20:00.148370 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9fe5aa0a_37f5_44a8_961b_ea84ba342be4.slice/crio-6faccdcc133b09932f94af496b97193e2bf87efd92f4f2a0f333389d528e4583 WatchSource:0}: Error finding container 6faccdcc133b09932f94af496b97193e2bf87efd92f4f2a0f333389d528e4583: Status 404 returned error can't find the container with id 6faccdcc133b09932f94af496b97193e2bf87efd92f4f2a0f333389d528e4583 Nov 26 23:20:00 crc kubenswrapper[4903]: I1126 23:20:00.936889 4903 generic.go:334] "Generic (PLEG): container finished" podID="9fe5aa0a-37f5-44a8-961b-ea84ba342be4" containerID="5065cf2e797660c0066123a61b55d0eef3bb493ae2d2799089fa89fa1663a598" exitCode=0 Nov 26 23:20:00 crc kubenswrapper[4903]: I1126 23:20:00.936961 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cjrmt" event={"ID":"9fe5aa0a-37f5-44a8-961b-ea84ba342be4","Type":"ContainerDied","Data":"5065cf2e797660c0066123a61b55d0eef3bb493ae2d2799089fa89fa1663a598"} Nov 26 23:20:00 crc kubenswrapper[4903]: I1126 23:20:00.937201 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cjrmt" event={"ID":"9fe5aa0a-37f5-44a8-961b-ea84ba342be4","Type":"ContainerStarted","Data":"6faccdcc133b09932f94af496b97193e2bf87efd92f4f2a0f333389d528e4583"} Nov 26 23:20:02 crc kubenswrapper[4903]: I1126 23:20:02.967656 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cjrmt" event={"ID":"9fe5aa0a-37f5-44a8-961b-ea84ba342be4","Type":"ContainerStarted","Data":"8cf62f3b1358b71fe0f815a9ffdc406714e8c636a962c9638d9b43c5f54d1068"} Nov 26 23:20:03 crc kubenswrapper[4903]: I1126 23:20:03.982289 4903 generic.go:334] "Generic (PLEG): container finished" podID="9fe5aa0a-37f5-44a8-961b-ea84ba342be4" containerID="8cf62f3b1358b71fe0f815a9ffdc406714e8c636a962c9638d9b43c5f54d1068" exitCode=0 Nov 26 23:20:03 crc kubenswrapper[4903]: I1126 23:20:03.982340 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cjrmt" event={"ID":"9fe5aa0a-37f5-44a8-961b-ea84ba342be4","Type":"ContainerDied","Data":"8cf62f3b1358b71fe0f815a9ffdc406714e8c636a962c9638d9b43c5f54d1068"} Nov 26 23:20:06 crc kubenswrapper[4903]: I1126 23:20:06.013438 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cjrmt" event={"ID":"9fe5aa0a-37f5-44a8-961b-ea84ba342be4","Type":"ContainerStarted","Data":"4604d00c4d91644d619e5c3ba2660dccd0ab88160bf72ad8cb4583cd3524e383"} Nov 26 23:20:06 crc kubenswrapper[4903]: I1126 23:20:06.060713 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-cjrmt" podStartSLOduration=2.280083281 podStartE2EDuration="7.060662013s" podCreationTimestamp="2025-11-26 23:19:59 +0000 UTC" firstStartedPulling="2025-11-26 23:20:00.939609215 +0000 UTC m=+3529.629844115" lastFinishedPulling="2025-11-26 23:20:05.720187897 +0000 UTC m=+3534.410422847" observedRunningTime="2025-11-26 23:20:06.048282672 +0000 UTC m=+3534.738517592" watchObservedRunningTime="2025-11-26 23:20:06.060662013 +0000 UTC m=+3534.750896953" Nov 26 23:20:09 crc kubenswrapper[4903]: I1126 23:20:09.686472 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-cjrmt" Nov 26 23:20:09 crc kubenswrapper[4903]: I1126 23:20:09.687138 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-cjrmt" Nov 26 23:20:09 crc kubenswrapper[4903]: I1126 23:20:09.769057 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-cjrmt" Nov 26 23:20:10 crc kubenswrapper[4903]: I1126 23:20:10.135980 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-cjrmt" Nov 26 23:20:10 crc kubenswrapper[4903]: I1126 23:20:10.199906 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-cjrmt"] Nov 26 23:20:12 crc kubenswrapper[4903]: I1126 23:20:12.106297 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-cjrmt" podUID="9fe5aa0a-37f5-44a8-961b-ea84ba342be4" containerName="registry-server" containerID="cri-o://4604d00c4d91644d619e5c3ba2660dccd0ab88160bf72ad8cb4583cd3524e383" gracePeriod=2 Nov 26 23:20:12 crc kubenswrapper[4903]: I1126 23:20:12.651738 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cjrmt" Nov 26 23:20:12 crc kubenswrapper[4903]: I1126 23:20:12.824184 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9fe5aa0a-37f5-44a8-961b-ea84ba342be4-catalog-content\") pod \"9fe5aa0a-37f5-44a8-961b-ea84ba342be4\" (UID: \"9fe5aa0a-37f5-44a8-961b-ea84ba342be4\") " Nov 26 23:20:12 crc kubenswrapper[4903]: I1126 23:20:12.824444 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mqp57\" (UniqueName: \"kubernetes.io/projected/9fe5aa0a-37f5-44a8-961b-ea84ba342be4-kube-api-access-mqp57\") pod \"9fe5aa0a-37f5-44a8-961b-ea84ba342be4\" (UID: \"9fe5aa0a-37f5-44a8-961b-ea84ba342be4\") " Nov 26 23:20:12 crc kubenswrapper[4903]: I1126 23:20:12.824543 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9fe5aa0a-37f5-44a8-961b-ea84ba342be4-utilities\") pod \"9fe5aa0a-37f5-44a8-961b-ea84ba342be4\" (UID: \"9fe5aa0a-37f5-44a8-961b-ea84ba342be4\") " Nov 26 23:20:12 crc kubenswrapper[4903]: I1126 23:20:12.826046 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9fe5aa0a-37f5-44a8-961b-ea84ba342be4-utilities" (OuterVolumeSpecName: "utilities") pod "9fe5aa0a-37f5-44a8-961b-ea84ba342be4" (UID: "9fe5aa0a-37f5-44a8-961b-ea84ba342be4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:20:12 crc kubenswrapper[4903]: I1126 23:20:12.833653 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9fe5aa0a-37f5-44a8-961b-ea84ba342be4-kube-api-access-mqp57" (OuterVolumeSpecName: "kube-api-access-mqp57") pod "9fe5aa0a-37f5-44a8-961b-ea84ba342be4" (UID: "9fe5aa0a-37f5-44a8-961b-ea84ba342be4"). InnerVolumeSpecName "kube-api-access-mqp57". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:20:12 crc kubenswrapper[4903]: I1126 23:20:12.851615 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9fe5aa0a-37f5-44a8-961b-ea84ba342be4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9fe5aa0a-37f5-44a8-961b-ea84ba342be4" (UID: "9fe5aa0a-37f5-44a8-961b-ea84ba342be4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:20:12 crc kubenswrapper[4903]: I1126 23:20:12.927743 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9fe5aa0a-37f5-44a8-961b-ea84ba342be4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 23:20:12 crc kubenswrapper[4903]: I1126 23:20:12.927797 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mqp57\" (UniqueName: \"kubernetes.io/projected/9fe5aa0a-37f5-44a8-961b-ea84ba342be4-kube-api-access-mqp57\") on node \"crc\" DevicePath \"\"" Nov 26 23:20:12 crc kubenswrapper[4903]: I1126 23:20:12.927819 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9fe5aa0a-37f5-44a8-961b-ea84ba342be4-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 23:20:13 crc kubenswrapper[4903]: I1126 23:20:13.127889 4903 generic.go:334] "Generic (PLEG): container finished" podID="9fe5aa0a-37f5-44a8-961b-ea84ba342be4" containerID="4604d00c4d91644d619e5c3ba2660dccd0ab88160bf72ad8cb4583cd3524e383" exitCode=0 Nov 26 23:20:13 crc kubenswrapper[4903]: I1126 23:20:13.127957 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cjrmt" event={"ID":"9fe5aa0a-37f5-44a8-961b-ea84ba342be4","Type":"ContainerDied","Data":"4604d00c4d91644d619e5c3ba2660dccd0ab88160bf72ad8cb4583cd3524e383"} Nov 26 23:20:13 crc kubenswrapper[4903]: I1126 23:20:13.128009 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cjrmt" Nov 26 23:20:13 crc kubenswrapper[4903]: I1126 23:20:13.128048 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cjrmt" event={"ID":"9fe5aa0a-37f5-44a8-961b-ea84ba342be4","Type":"ContainerDied","Data":"6faccdcc133b09932f94af496b97193e2bf87efd92f4f2a0f333389d528e4583"} Nov 26 23:20:13 crc kubenswrapper[4903]: I1126 23:20:13.128091 4903 scope.go:117] "RemoveContainer" containerID="4604d00c4d91644d619e5c3ba2660dccd0ab88160bf72ad8cb4583cd3524e383" Nov 26 23:20:13 crc kubenswrapper[4903]: I1126 23:20:13.173661 4903 scope.go:117] "RemoveContainer" containerID="8cf62f3b1358b71fe0f815a9ffdc406714e8c636a962c9638d9b43c5f54d1068" Nov 26 23:20:13 crc kubenswrapper[4903]: I1126 23:20:13.200392 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-cjrmt"] Nov 26 23:20:13 crc kubenswrapper[4903]: I1126 23:20:13.221483 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-cjrmt"] Nov 26 23:20:13 crc kubenswrapper[4903]: I1126 23:20:13.233014 4903 scope.go:117] "RemoveContainer" containerID="5065cf2e797660c0066123a61b55d0eef3bb493ae2d2799089fa89fa1663a598" Nov 26 23:20:13 crc kubenswrapper[4903]: I1126 23:20:13.286765 4903 scope.go:117] "RemoveContainer" containerID="4604d00c4d91644d619e5c3ba2660dccd0ab88160bf72ad8cb4583cd3524e383" Nov 26 23:20:13 crc kubenswrapper[4903]: E1126 23:20:13.287379 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4604d00c4d91644d619e5c3ba2660dccd0ab88160bf72ad8cb4583cd3524e383\": container with ID starting with 4604d00c4d91644d619e5c3ba2660dccd0ab88160bf72ad8cb4583cd3524e383 not found: ID does not exist" containerID="4604d00c4d91644d619e5c3ba2660dccd0ab88160bf72ad8cb4583cd3524e383" Nov 26 23:20:13 crc kubenswrapper[4903]: I1126 23:20:13.287438 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4604d00c4d91644d619e5c3ba2660dccd0ab88160bf72ad8cb4583cd3524e383"} err="failed to get container status \"4604d00c4d91644d619e5c3ba2660dccd0ab88160bf72ad8cb4583cd3524e383\": rpc error: code = NotFound desc = could not find container \"4604d00c4d91644d619e5c3ba2660dccd0ab88160bf72ad8cb4583cd3524e383\": container with ID starting with 4604d00c4d91644d619e5c3ba2660dccd0ab88160bf72ad8cb4583cd3524e383 not found: ID does not exist" Nov 26 23:20:13 crc kubenswrapper[4903]: I1126 23:20:13.287516 4903 scope.go:117] "RemoveContainer" containerID="8cf62f3b1358b71fe0f815a9ffdc406714e8c636a962c9638d9b43c5f54d1068" Nov 26 23:20:13 crc kubenswrapper[4903]: E1126 23:20:13.288271 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8cf62f3b1358b71fe0f815a9ffdc406714e8c636a962c9638d9b43c5f54d1068\": container with ID starting with 8cf62f3b1358b71fe0f815a9ffdc406714e8c636a962c9638d9b43c5f54d1068 not found: ID does not exist" containerID="8cf62f3b1358b71fe0f815a9ffdc406714e8c636a962c9638d9b43c5f54d1068" Nov 26 23:20:13 crc kubenswrapper[4903]: I1126 23:20:13.288977 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8cf62f3b1358b71fe0f815a9ffdc406714e8c636a962c9638d9b43c5f54d1068"} err="failed to get container status \"8cf62f3b1358b71fe0f815a9ffdc406714e8c636a962c9638d9b43c5f54d1068\": rpc error: code = NotFound desc = could not find container \"8cf62f3b1358b71fe0f815a9ffdc406714e8c636a962c9638d9b43c5f54d1068\": container with ID starting with 8cf62f3b1358b71fe0f815a9ffdc406714e8c636a962c9638d9b43c5f54d1068 not found: ID does not exist" Nov 26 23:20:13 crc kubenswrapper[4903]: I1126 23:20:13.289052 4903 scope.go:117] "RemoveContainer" containerID="5065cf2e797660c0066123a61b55d0eef3bb493ae2d2799089fa89fa1663a598" Nov 26 23:20:13 crc kubenswrapper[4903]: E1126 23:20:13.289470 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5065cf2e797660c0066123a61b55d0eef3bb493ae2d2799089fa89fa1663a598\": container with ID starting with 5065cf2e797660c0066123a61b55d0eef3bb493ae2d2799089fa89fa1663a598 not found: ID does not exist" containerID="5065cf2e797660c0066123a61b55d0eef3bb493ae2d2799089fa89fa1663a598" Nov 26 23:20:13 crc kubenswrapper[4903]: I1126 23:20:13.289534 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5065cf2e797660c0066123a61b55d0eef3bb493ae2d2799089fa89fa1663a598"} err="failed to get container status \"5065cf2e797660c0066123a61b55d0eef3bb493ae2d2799089fa89fa1663a598\": rpc error: code = NotFound desc = could not find container \"5065cf2e797660c0066123a61b55d0eef3bb493ae2d2799089fa89fa1663a598\": container with ID starting with 5065cf2e797660c0066123a61b55d0eef3bb493ae2d2799089fa89fa1663a598 not found: ID does not exist" Nov 26 23:20:14 crc kubenswrapper[4903]: I1126 23:20:14.043262 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9fe5aa0a-37f5-44a8-961b-ea84ba342be4" path="/var/lib/kubelet/pods/9fe5aa0a-37f5-44a8-961b-ea84ba342be4/volumes" Nov 26 23:20:33 crc kubenswrapper[4903]: E1126 23:20:33.984351 4903 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.219:44648->38.102.83.219:36831: write tcp 38.102.83.219:44648->38.102.83.219:36831: write: broken pipe Nov 26 23:21:01 crc kubenswrapper[4903]: I1126 23:21:01.983379 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 23:21:01 crc kubenswrapper[4903]: I1126 23:21:01.984372 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 23:21:31 crc kubenswrapper[4903]: I1126 23:21:31.981646 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 23:21:31 crc kubenswrapper[4903]: I1126 23:21:31.982304 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 23:21:59 crc kubenswrapper[4903]: I1126 23:21:59.434336 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-npsdr"] Nov 26 23:21:59 crc kubenswrapper[4903]: E1126 23:21:59.435396 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9fe5aa0a-37f5-44a8-961b-ea84ba342be4" containerName="extract-content" Nov 26 23:21:59 crc kubenswrapper[4903]: I1126 23:21:59.435412 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="9fe5aa0a-37f5-44a8-961b-ea84ba342be4" containerName="extract-content" Nov 26 23:21:59 crc kubenswrapper[4903]: E1126 23:21:59.435449 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9fe5aa0a-37f5-44a8-961b-ea84ba342be4" containerName="extract-utilities" Nov 26 23:21:59 crc kubenswrapper[4903]: I1126 23:21:59.435458 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="9fe5aa0a-37f5-44a8-961b-ea84ba342be4" containerName="extract-utilities" Nov 26 23:21:59 crc kubenswrapper[4903]: E1126 23:21:59.435486 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9fe5aa0a-37f5-44a8-961b-ea84ba342be4" containerName="registry-server" Nov 26 23:21:59 crc kubenswrapper[4903]: I1126 23:21:59.435495 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="9fe5aa0a-37f5-44a8-961b-ea84ba342be4" containerName="registry-server" Nov 26 23:21:59 crc kubenswrapper[4903]: I1126 23:21:59.435807 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="9fe5aa0a-37f5-44a8-961b-ea84ba342be4" containerName="registry-server" Nov 26 23:21:59 crc kubenswrapper[4903]: I1126 23:21:59.437891 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-npsdr" Nov 26 23:21:59 crc kubenswrapper[4903]: I1126 23:21:59.447181 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-npsdr"] Nov 26 23:21:59 crc kubenswrapper[4903]: I1126 23:21:59.599704 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdg89\" (UniqueName: \"kubernetes.io/projected/33cd040b-fe08-424c-a1af-62df1ed45ad4-kube-api-access-jdg89\") pod \"certified-operators-npsdr\" (UID: \"33cd040b-fe08-424c-a1af-62df1ed45ad4\") " pod="openshift-marketplace/certified-operators-npsdr" Nov 26 23:21:59 crc kubenswrapper[4903]: I1126 23:21:59.599927 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/33cd040b-fe08-424c-a1af-62df1ed45ad4-utilities\") pod \"certified-operators-npsdr\" (UID: \"33cd040b-fe08-424c-a1af-62df1ed45ad4\") " pod="openshift-marketplace/certified-operators-npsdr" Nov 26 23:21:59 crc kubenswrapper[4903]: I1126 23:21:59.600237 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/33cd040b-fe08-424c-a1af-62df1ed45ad4-catalog-content\") pod \"certified-operators-npsdr\" (UID: \"33cd040b-fe08-424c-a1af-62df1ed45ad4\") " pod="openshift-marketplace/certified-operators-npsdr" Nov 26 23:21:59 crc kubenswrapper[4903]: I1126 23:21:59.702154 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdg89\" (UniqueName: \"kubernetes.io/projected/33cd040b-fe08-424c-a1af-62df1ed45ad4-kube-api-access-jdg89\") pod \"certified-operators-npsdr\" (UID: \"33cd040b-fe08-424c-a1af-62df1ed45ad4\") " pod="openshift-marketplace/certified-operators-npsdr" Nov 26 23:21:59 crc kubenswrapper[4903]: I1126 23:21:59.702327 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/33cd040b-fe08-424c-a1af-62df1ed45ad4-utilities\") pod \"certified-operators-npsdr\" (UID: \"33cd040b-fe08-424c-a1af-62df1ed45ad4\") " pod="openshift-marketplace/certified-operators-npsdr" Nov 26 23:21:59 crc kubenswrapper[4903]: I1126 23:21:59.703076 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/33cd040b-fe08-424c-a1af-62df1ed45ad4-utilities\") pod \"certified-operators-npsdr\" (UID: \"33cd040b-fe08-424c-a1af-62df1ed45ad4\") " pod="openshift-marketplace/certified-operators-npsdr" Nov 26 23:21:59 crc kubenswrapper[4903]: I1126 23:21:59.703299 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/33cd040b-fe08-424c-a1af-62df1ed45ad4-catalog-content\") pod \"certified-operators-npsdr\" (UID: \"33cd040b-fe08-424c-a1af-62df1ed45ad4\") " pod="openshift-marketplace/certified-operators-npsdr" Nov 26 23:21:59 crc kubenswrapper[4903]: I1126 23:21:59.703548 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/33cd040b-fe08-424c-a1af-62df1ed45ad4-catalog-content\") pod \"certified-operators-npsdr\" (UID: \"33cd040b-fe08-424c-a1af-62df1ed45ad4\") " pod="openshift-marketplace/certified-operators-npsdr" Nov 26 23:21:59 crc kubenswrapper[4903]: I1126 23:21:59.736295 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdg89\" (UniqueName: \"kubernetes.io/projected/33cd040b-fe08-424c-a1af-62df1ed45ad4-kube-api-access-jdg89\") pod \"certified-operators-npsdr\" (UID: \"33cd040b-fe08-424c-a1af-62df1ed45ad4\") " pod="openshift-marketplace/certified-operators-npsdr" Nov 26 23:21:59 crc kubenswrapper[4903]: I1126 23:21:59.760017 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-npsdr" Nov 26 23:22:00 crc kubenswrapper[4903]: W1126 23:22:00.324988 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod33cd040b_fe08_424c_a1af_62df1ed45ad4.slice/crio-6c29aabf54304ec7314a8b4941bbe83dba259d12eaa5d37d609cadfb442f0412 WatchSource:0}: Error finding container 6c29aabf54304ec7314a8b4941bbe83dba259d12eaa5d37d609cadfb442f0412: Status 404 returned error can't find the container with id 6c29aabf54304ec7314a8b4941bbe83dba259d12eaa5d37d609cadfb442f0412 Nov 26 23:22:00 crc kubenswrapper[4903]: I1126 23:22:00.333593 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-npsdr"] Nov 26 23:22:00 crc kubenswrapper[4903]: I1126 23:22:00.522200 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-npsdr" event={"ID":"33cd040b-fe08-424c-a1af-62df1ed45ad4","Type":"ContainerStarted","Data":"6c29aabf54304ec7314a8b4941bbe83dba259d12eaa5d37d609cadfb442f0412"} Nov 26 23:22:01 crc kubenswrapper[4903]: I1126 23:22:01.543511 4903 generic.go:334] "Generic (PLEG): container finished" podID="33cd040b-fe08-424c-a1af-62df1ed45ad4" containerID="c7f24b42718656d2cdfc8db8ea5dbf7007816bb97b1982d10b9b7206e1709232" exitCode=0 Nov 26 23:22:01 crc kubenswrapper[4903]: I1126 23:22:01.543587 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-npsdr" event={"ID":"33cd040b-fe08-424c-a1af-62df1ed45ad4","Type":"ContainerDied","Data":"c7f24b42718656d2cdfc8db8ea5dbf7007816bb97b1982d10b9b7206e1709232"} Nov 26 23:22:01 crc kubenswrapper[4903]: I1126 23:22:01.981412 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 23:22:01 crc kubenswrapper[4903]: I1126 23:22:01.981835 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 23:22:01 crc kubenswrapper[4903]: I1126 23:22:01.981903 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 23:22:01 crc kubenswrapper[4903]: I1126 23:22:01.983167 4903 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"27d445047f53e9bdb50cf786946986bbc016581b4443a5826b3fe320ad548c3c"} pod="openshift-machine-config-operator/machine-config-daemon-wjwph" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 23:22:01 crc kubenswrapper[4903]: I1126 23:22:01.983257 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" containerID="cri-o://27d445047f53e9bdb50cf786946986bbc016581b4443a5826b3fe320ad548c3c" gracePeriod=600 Nov 26 23:22:02 crc kubenswrapper[4903]: I1126 23:22:02.559432 4903 generic.go:334] "Generic (PLEG): container finished" podID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerID="27d445047f53e9bdb50cf786946986bbc016581b4443a5826b3fe320ad548c3c" exitCode=0 Nov 26 23:22:02 crc kubenswrapper[4903]: I1126 23:22:02.559804 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerDied","Data":"27d445047f53e9bdb50cf786946986bbc016581b4443a5826b3fe320ad548c3c"} Nov 26 23:22:02 crc kubenswrapper[4903]: I1126 23:22:02.559979 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerStarted","Data":"acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6"} Nov 26 23:22:02 crc kubenswrapper[4903]: I1126 23:22:02.560010 4903 scope.go:117] "RemoveContainer" containerID="07393a38f4fd2e9fc945ab2d54c098ef3202c02f4e371887d908a424bf5138c9" Nov 26 23:22:10 crc kubenswrapper[4903]: I1126 23:22:10.709357 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-npsdr" event={"ID":"33cd040b-fe08-424c-a1af-62df1ed45ad4","Type":"ContainerStarted","Data":"c69cdce5d50d4f76f4d7f6c055789a1740cefa7462218ac663082a5d101d2c20"} Nov 26 23:22:11 crc kubenswrapper[4903]: I1126 23:22:11.725731 4903 generic.go:334] "Generic (PLEG): container finished" podID="33cd040b-fe08-424c-a1af-62df1ed45ad4" containerID="c69cdce5d50d4f76f4d7f6c055789a1740cefa7462218ac663082a5d101d2c20" exitCode=0 Nov 26 23:22:11 crc kubenswrapper[4903]: I1126 23:22:11.725762 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-npsdr" event={"ID":"33cd040b-fe08-424c-a1af-62df1ed45ad4","Type":"ContainerDied","Data":"c69cdce5d50d4f76f4d7f6c055789a1740cefa7462218ac663082a5d101d2c20"} Nov 26 23:22:13 crc kubenswrapper[4903]: I1126 23:22:13.754552 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-npsdr" event={"ID":"33cd040b-fe08-424c-a1af-62df1ed45ad4","Type":"ContainerStarted","Data":"a503642c1f04c7451bba3d2a27027dc7adb9d3e4012122282f27a7e7b7e0c63d"} Nov 26 23:22:13 crc kubenswrapper[4903]: I1126 23:22:13.780551 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-npsdr" podStartSLOduration=3.861697965 podStartE2EDuration="14.780529852s" podCreationTimestamp="2025-11-26 23:21:59 +0000 UTC" firstStartedPulling="2025-11-26 23:22:01.548224106 +0000 UTC m=+3650.238459026" lastFinishedPulling="2025-11-26 23:22:12.467055963 +0000 UTC m=+3661.157290913" observedRunningTime="2025-11-26 23:22:13.777332127 +0000 UTC m=+3662.467567047" watchObservedRunningTime="2025-11-26 23:22:13.780529852 +0000 UTC m=+3662.470764772" Nov 26 23:22:19 crc kubenswrapper[4903]: I1126 23:22:19.760338 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-npsdr" Nov 26 23:22:19 crc kubenswrapper[4903]: I1126 23:22:19.760844 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-npsdr" Nov 26 23:22:19 crc kubenswrapper[4903]: I1126 23:22:19.854682 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-npsdr" Nov 26 23:22:19 crc kubenswrapper[4903]: I1126 23:22:19.922406 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-npsdr" Nov 26 23:22:19 crc kubenswrapper[4903]: I1126 23:22:19.990228 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-npsdr"] Nov 26 23:22:20 crc kubenswrapper[4903]: I1126 23:22:20.094640 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qzmvm"] Nov 26 23:22:20 crc kubenswrapper[4903]: I1126 23:22:20.094900 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qzmvm" podUID="3a9c3908-6825-461f-894f-f187b429fece" containerName="registry-server" containerID="cri-o://94039a700b052d3aaf1096120b85b898ed1433dbe63ae1abb0a43568265312f7" gracePeriod=2 Nov 26 23:22:20 crc kubenswrapper[4903]: I1126 23:22:20.591000 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qzmvm" Nov 26 23:22:20 crc kubenswrapper[4903]: I1126 23:22:20.769817 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a9c3908-6825-461f-894f-f187b429fece-catalog-content\") pod \"3a9c3908-6825-461f-894f-f187b429fece\" (UID: \"3a9c3908-6825-461f-894f-f187b429fece\") " Nov 26 23:22:20 crc kubenswrapper[4903]: I1126 23:22:20.770764 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a9c3908-6825-461f-894f-f187b429fece-utilities\") pod \"3a9c3908-6825-461f-894f-f187b429fece\" (UID: \"3a9c3908-6825-461f-894f-f187b429fece\") " Nov 26 23:22:20 crc kubenswrapper[4903]: I1126 23:22:20.770897 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dfsv9\" (UniqueName: \"kubernetes.io/projected/3a9c3908-6825-461f-894f-f187b429fece-kube-api-access-dfsv9\") pod \"3a9c3908-6825-461f-894f-f187b429fece\" (UID: \"3a9c3908-6825-461f-894f-f187b429fece\") " Nov 26 23:22:20 crc kubenswrapper[4903]: I1126 23:22:20.771171 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a9c3908-6825-461f-894f-f187b429fece-utilities" (OuterVolumeSpecName: "utilities") pod "3a9c3908-6825-461f-894f-f187b429fece" (UID: "3a9c3908-6825-461f-894f-f187b429fece"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:22:20 crc kubenswrapper[4903]: I1126 23:22:20.771577 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a9c3908-6825-461f-894f-f187b429fece-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 23:22:20 crc kubenswrapper[4903]: I1126 23:22:20.777159 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a9c3908-6825-461f-894f-f187b429fece-kube-api-access-dfsv9" (OuterVolumeSpecName: "kube-api-access-dfsv9") pod "3a9c3908-6825-461f-894f-f187b429fece" (UID: "3a9c3908-6825-461f-894f-f187b429fece"). InnerVolumeSpecName "kube-api-access-dfsv9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:22:20 crc kubenswrapper[4903]: I1126 23:22:20.808324 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a9c3908-6825-461f-894f-f187b429fece-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3a9c3908-6825-461f-894f-f187b429fece" (UID: "3a9c3908-6825-461f-894f-f187b429fece"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:22:20 crc kubenswrapper[4903]: I1126 23:22:20.854846 4903 generic.go:334] "Generic (PLEG): container finished" podID="3a9c3908-6825-461f-894f-f187b429fece" containerID="94039a700b052d3aaf1096120b85b898ed1433dbe63ae1abb0a43568265312f7" exitCode=0 Nov 26 23:22:20 crc kubenswrapper[4903]: I1126 23:22:20.854894 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qzmvm" Nov 26 23:22:20 crc kubenswrapper[4903]: I1126 23:22:20.854958 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qzmvm" event={"ID":"3a9c3908-6825-461f-894f-f187b429fece","Type":"ContainerDied","Data":"94039a700b052d3aaf1096120b85b898ed1433dbe63ae1abb0a43568265312f7"} Nov 26 23:22:20 crc kubenswrapper[4903]: I1126 23:22:20.855019 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qzmvm" event={"ID":"3a9c3908-6825-461f-894f-f187b429fece","Type":"ContainerDied","Data":"a326e3d960afb74a0a00472fac7036e34a2391ee053f6c863071f5c082e3f98a"} Nov 26 23:22:20 crc kubenswrapper[4903]: I1126 23:22:20.855038 4903 scope.go:117] "RemoveContainer" containerID="94039a700b052d3aaf1096120b85b898ed1433dbe63ae1abb0a43568265312f7" Nov 26 23:22:20 crc kubenswrapper[4903]: I1126 23:22:20.873401 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dfsv9\" (UniqueName: \"kubernetes.io/projected/3a9c3908-6825-461f-894f-f187b429fece-kube-api-access-dfsv9\") on node \"crc\" DevicePath \"\"" Nov 26 23:22:20 crc kubenswrapper[4903]: I1126 23:22:20.873427 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a9c3908-6825-461f-894f-f187b429fece-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 23:22:20 crc kubenswrapper[4903]: I1126 23:22:20.878142 4903 scope.go:117] "RemoveContainer" containerID="4c80e03eac8624a843ef94901ada3916cd8ab79b27e1165758b3f003e156b3cf" Nov 26 23:22:20 crc kubenswrapper[4903]: I1126 23:22:20.892662 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qzmvm"] Nov 26 23:22:20 crc kubenswrapper[4903]: I1126 23:22:20.903574 4903 scope.go:117] "RemoveContainer" containerID="4fb69257b865a27fe0786c7c785f72c9242eb862560eca51d4aa0c9a83122c15" Nov 26 23:22:20 crc kubenswrapper[4903]: I1126 23:22:20.906353 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qzmvm"] Nov 26 23:22:20 crc kubenswrapper[4903]: I1126 23:22:20.957285 4903 scope.go:117] "RemoveContainer" containerID="94039a700b052d3aaf1096120b85b898ed1433dbe63ae1abb0a43568265312f7" Nov 26 23:22:20 crc kubenswrapper[4903]: E1126 23:22:20.957874 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"94039a700b052d3aaf1096120b85b898ed1433dbe63ae1abb0a43568265312f7\": container with ID starting with 94039a700b052d3aaf1096120b85b898ed1433dbe63ae1abb0a43568265312f7 not found: ID does not exist" containerID="94039a700b052d3aaf1096120b85b898ed1433dbe63ae1abb0a43568265312f7" Nov 26 23:22:20 crc kubenswrapper[4903]: I1126 23:22:20.957925 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94039a700b052d3aaf1096120b85b898ed1433dbe63ae1abb0a43568265312f7"} err="failed to get container status \"94039a700b052d3aaf1096120b85b898ed1433dbe63ae1abb0a43568265312f7\": rpc error: code = NotFound desc = could not find container \"94039a700b052d3aaf1096120b85b898ed1433dbe63ae1abb0a43568265312f7\": container with ID starting with 94039a700b052d3aaf1096120b85b898ed1433dbe63ae1abb0a43568265312f7 not found: ID does not exist" Nov 26 23:22:20 crc kubenswrapper[4903]: I1126 23:22:20.957958 4903 scope.go:117] "RemoveContainer" containerID="4c80e03eac8624a843ef94901ada3916cd8ab79b27e1165758b3f003e156b3cf" Nov 26 23:22:20 crc kubenswrapper[4903]: E1126 23:22:20.958436 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c80e03eac8624a843ef94901ada3916cd8ab79b27e1165758b3f003e156b3cf\": container with ID starting with 4c80e03eac8624a843ef94901ada3916cd8ab79b27e1165758b3f003e156b3cf not found: ID does not exist" containerID="4c80e03eac8624a843ef94901ada3916cd8ab79b27e1165758b3f003e156b3cf" Nov 26 23:22:20 crc kubenswrapper[4903]: I1126 23:22:20.958483 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c80e03eac8624a843ef94901ada3916cd8ab79b27e1165758b3f003e156b3cf"} err="failed to get container status \"4c80e03eac8624a843ef94901ada3916cd8ab79b27e1165758b3f003e156b3cf\": rpc error: code = NotFound desc = could not find container \"4c80e03eac8624a843ef94901ada3916cd8ab79b27e1165758b3f003e156b3cf\": container with ID starting with 4c80e03eac8624a843ef94901ada3916cd8ab79b27e1165758b3f003e156b3cf not found: ID does not exist" Nov 26 23:22:20 crc kubenswrapper[4903]: I1126 23:22:20.958515 4903 scope.go:117] "RemoveContainer" containerID="4fb69257b865a27fe0786c7c785f72c9242eb862560eca51d4aa0c9a83122c15" Nov 26 23:22:20 crc kubenswrapper[4903]: E1126 23:22:20.958849 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4fb69257b865a27fe0786c7c785f72c9242eb862560eca51d4aa0c9a83122c15\": container with ID starting with 4fb69257b865a27fe0786c7c785f72c9242eb862560eca51d4aa0c9a83122c15 not found: ID does not exist" containerID="4fb69257b865a27fe0786c7c785f72c9242eb862560eca51d4aa0c9a83122c15" Nov 26 23:22:20 crc kubenswrapper[4903]: I1126 23:22:20.958877 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4fb69257b865a27fe0786c7c785f72c9242eb862560eca51d4aa0c9a83122c15"} err="failed to get container status \"4fb69257b865a27fe0786c7c785f72c9242eb862560eca51d4aa0c9a83122c15\": rpc error: code = NotFound desc = could not find container \"4fb69257b865a27fe0786c7c785f72c9242eb862560eca51d4aa0c9a83122c15\": container with ID starting with 4fb69257b865a27fe0786c7c785f72c9242eb862560eca51d4aa0c9a83122c15 not found: ID does not exist" Nov 26 23:22:22 crc kubenswrapper[4903]: I1126 23:22:22.040862 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a9c3908-6825-461f-894f-f187b429fece" path="/var/lib/kubelet/pods/3a9c3908-6825-461f-894f-f187b429fece/volumes" Nov 26 23:24:31 crc kubenswrapper[4903]: I1126 23:24:31.981174 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 23:24:31 crc kubenswrapper[4903]: I1126 23:24:31.984027 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 23:25:01 crc kubenswrapper[4903]: I1126 23:25:01.981454 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 23:25:01 crc kubenswrapper[4903]: I1126 23:25:01.982202 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 23:25:22 crc kubenswrapper[4903]: I1126 23:25:22.794848 4903 scope.go:117] "RemoveContainer" containerID="bf0d999b2394cab7ea0f588075ea39635e02647640c73d376e17ee12ca51c381" Nov 26 23:25:22 crc kubenswrapper[4903]: I1126 23:25:22.835193 4903 scope.go:117] "RemoveContainer" containerID="99d5b4c65046bb0c1d58163e3e4a5481cb9ffba18413ec23c3f5ac7dc1f33b52" Nov 26 23:25:22 crc kubenswrapper[4903]: I1126 23:25:22.891680 4903 scope.go:117] "RemoveContainer" containerID="aaa6d3e4956913218b62679ae156477bd1b1b93fca97ec5480a564e8c6bb01b1" Nov 26 23:25:31 crc kubenswrapper[4903]: I1126 23:25:31.981383 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 23:25:31 crc kubenswrapper[4903]: I1126 23:25:31.981872 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 23:25:31 crc kubenswrapper[4903]: I1126 23:25:31.981914 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 23:25:31 crc kubenswrapper[4903]: I1126 23:25:31.982788 4903 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6"} pod="openshift-machine-config-operator/machine-config-daemon-wjwph" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 23:25:31 crc kubenswrapper[4903]: I1126 23:25:31.982831 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" containerID="cri-o://acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6" gracePeriod=600 Nov 26 23:25:32 crc kubenswrapper[4903]: E1126 23:25:32.109266 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:25:32 crc kubenswrapper[4903]: I1126 23:25:32.561149 4903 generic.go:334] "Generic (PLEG): container finished" podID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerID="acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6" exitCode=0 Nov 26 23:25:32 crc kubenswrapper[4903]: I1126 23:25:32.561410 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerDied","Data":"acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6"} Nov 26 23:25:32 crc kubenswrapper[4903]: I1126 23:25:32.561951 4903 scope.go:117] "RemoveContainer" containerID="27d445047f53e9bdb50cf786946986bbc016581b4443a5826b3fe320ad548c3c" Nov 26 23:25:32 crc kubenswrapper[4903]: I1126 23:25:32.563230 4903 scope.go:117] "RemoveContainer" containerID="acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6" Nov 26 23:25:32 crc kubenswrapper[4903]: E1126 23:25:32.563649 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:25:47 crc kubenswrapper[4903]: I1126 23:25:47.028496 4903 scope.go:117] "RemoveContainer" containerID="acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6" Nov 26 23:25:47 crc kubenswrapper[4903]: E1126 23:25:47.029127 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:26:02 crc kubenswrapper[4903]: I1126 23:26:02.041117 4903 scope.go:117] "RemoveContainer" containerID="acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6" Nov 26 23:26:02 crc kubenswrapper[4903]: E1126 23:26:02.042239 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:26:13 crc kubenswrapper[4903]: I1126 23:26:13.029868 4903 scope.go:117] "RemoveContainer" containerID="acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6" Nov 26 23:26:13 crc kubenswrapper[4903]: E1126 23:26:13.031863 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:26:24 crc kubenswrapper[4903]: I1126 23:26:24.035416 4903 scope.go:117] "RemoveContainer" containerID="acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6" Nov 26 23:26:24 crc kubenswrapper[4903]: E1126 23:26:24.036614 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:26:35 crc kubenswrapper[4903]: I1126 23:26:35.028802 4903 scope.go:117] "RemoveContainer" containerID="acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6" Nov 26 23:26:35 crc kubenswrapper[4903]: E1126 23:26:35.030322 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:26:47 crc kubenswrapper[4903]: I1126 23:26:47.029267 4903 scope.go:117] "RemoveContainer" containerID="acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6" Nov 26 23:26:47 crc kubenswrapper[4903]: E1126 23:26:47.030051 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:27:02 crc kubenswrapper[4903]: I1126 23:27:02.036371 4903 scope.go:117] "RemoveContainer" containerID="acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6" Nov 26 23:27:02 crc kubenswrapper[4903]: E1126 23:27:02.037229 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:27:13 crc kubenswrapper[4903]: I1126 23:27:13.029907 4903 scope.go:117] "RemoveContainer" containerID="acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6" Nov 26 23:27:13 crc kubenswrapper[4903]: E1126 23:27:13.031083 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:27:24 crc kubenswrapper[4903]: I1126 23:27:24.029069 4903 scope.go:117] "RemoveContainer" containerID="acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6" Nov 26 23:27:24 crc kubenswrapper[4903]: E1126 23:27:24.029880 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:27:35 crc kubenswrapper[4903]: I1126 23:27:35.031549 4903 scope.go:117] "RemoveContainer" containerID="acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6" Nov 26 23:27:35 crc kubenswrapper[4903]: E1126 23:27:35.040319 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:27:49 crc kubenswrapper[4903]: I1126 23:27:49.031268 4903 scope.go:117] "RemoveContainer" containerID="acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6" Nov 26 23:27:49 crc kubenswrapper[4903]: E1126 23:27:49.032602 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:28:03 crc kubenswrapper[4903]: I1126 23:28:03.029553 4903 scope.go:117] "RemoveContainer" containerID="acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6" Nov 26 23:28:03 crc kubenswrapper[4903]: E1126 23:28:03.030486 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:28:16 crc kubenswrapper[4903]: I1126 23:28:16.029286 4903 scope.go:117] "RemoveContainer" containerID="acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6" Nov 26 23:28:16 crc kubenswrapper[4903]: E1126 23:28:16.030425 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:28:28 crc kubenswrapper[4903]: I1126 23:28:28.029452 4903 scope.go:117] "RemoveContainer" containerID="acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6" Nov 26 23:28:28 crc kubenswrapper[4903]: E1126 23:28:28.030575 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:28:43 crc kubenswrapper[4903]: I1126 23:28:43.029619 4903 scope.go:117] "RemoveContainer" containerID="acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6" Nov 26 23:28:43 crc kubenswrapper[4903]: E1126 23:28:43.031246 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:28:44 crc kubenswrapper[4903]: I1126 23:28:44.132828 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-66kwf"] Nov 26 23:28:44 crc kubenswrapper[4903]: E1126 23:28:44.134334 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a9c3908-6825-461f-894f-f187b429fece" containerName="extract-content" Nov 26 23:28:44 crc kubenswrapper[4903]: I1126 23:28:44.134456 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a9c3908-6825-461f-894f-f187b429fece" containerName="extract-content" Nov 26 23:28:44 crc kubenswrapper[4903]: E1126 23:28:44.134562 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a9c3908-6825-461f-894f-f187b429fece" containerName="extract-utilities" Nov 26 23:28:44 crc kubenswrapper[4903]: I1126 23:28:44.134639 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a9c3908-6825-461f-894f-f187b429fece" containerName="extract-utilities" Nov 26 23:28:44 crc kubenswrapper[4903]: E1126 23:28:44.134811 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a9c3908-6825-461f-894f-f187b429fece" containerName="registry-server" Nov 26 23:28:44 crc kubenswrapper[4903]: I1126 23:28:44.134897 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a9c3908-6825-461f-894f-f187b429fece" containerName="registry-server" Nov 26 23:28:44 crc kubenswrapper[4903]: I1126 23:28:44.135269 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a9c3908-6825-461f-894f-f187b429fece" containerName="registry-server" Nov 26 23:28:44 crc kubenswrapper[4903]: I1126 23:28:44.137576 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-66kwf" Nov 26 23:28:44 crc kubenswrapper[4903]: I1126 23:28:44.149072 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-66kwf"] Nov 26 23:28:44 crc kubenswrapper[4903]: I1126 23:28:44.231417 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tkzgj\" (UniqueName: \"kubernetes.io/projected/4b09231e-544a-4007-b9a7-8c75515749f9-kube-api-access-tkzgj\") pod \"redhat-operators-66kwf\" (UID: \"4b09231e-544a-4007-b9a7-8c75515749f9\") " pod="openshift-marketplace/redhat-operators-66kwf" Nov 26 23:28:44 crc kubenswrapper[4903]: I1126 23:28:44.231474 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b09231e-544a-4007-b9a7-8c75515749f9-utilities\") pod \"redhat-operators-66kwf\" (UID: \"4b09231e-544a-4007-b9a7-8c75515749f9\") " pod="openshift-marketplace/redhat-operators-66kwf" Nov 26 23:28:44 crc kubenswrapper[4903]: I1126 23:28:44.232013 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b09231e-544a-4007-b9a7-8c75515749f9-catalog-content\") pod \"redhat-operators-66kwf\" (UID: \"4b09231e-544a-4007-b9a7-8c75515749f9\") " pod="openshift-marketplace/redhat-operators-66kwf" Nov 26 23:28:44 crc kubenswrapper[4903]: I1126 23:28:44.334292 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tkzgj\" (UniqueName: \"kubernetes.io/projected/4b09231e-544a-4007-b9a7-8c75515749f9-kube-api-access-tkzgj\") pod \"redhat-operators-66kwf\" (UID: \"4b09231e-544a-4007-b9a7-8c75515749f9\") " pod="openshift-marketplace/redhat-operators-66kwf" Nov 26 23:28:44 crc kubenswrapper[4903]: I1126 23:28:44.334360 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b09231e-544a-4007-b9a7-8c75515749f9-utilities\") pod \"redhat-operators-66kwf\" (UID: \"4b09231e-544a-4007-b9a7-8c75515749f9\") " pod="openshift-marketplace/redhat-operators-66kwf" Nov 26 23:28:44 crc kubenswrapper[4903]: I1126 23:28:44.334548 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b09231e-544a-4007-b9a7-8c75515749f9-catalog-content\") pod \"redhat-operators-66kwf\" (UID: \"4b09231e-544a-4007-b9a7-8c75515749f9\") " pod="openshift-marketplace/redhat-operators-66kwf" Nov 26 23:28:44 crc kubenswrapper[4903]: I1126 23:28:44.335314 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b09231e-544a-4007-b9a7-8c75515749f9-catalog-content\") pod \"redhat-operators-66kwf\" (UID: \"4b09231e-544a-4007-b9a7-8c75515749f9\") " pod="openshift-marketplace/redhat-operators-66kwf" Nov 26 23:28:44 crc kubenswrapper[4903]: I1126 23:28:44.335356 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b09231e-544a-4007-b9a7-8c75515749f9-utilities\") pod \"redhat-operators-66kwf\" (UID: \"4b09231e-544a-4007-b9a7-8c75515749f9\") " pod="openshift-marketplace/redhat-operators-66kwf" Nov 26 23:28:44 crc kubenswrapper[4903]: I1126 23:28:44.361930 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tkzgj\" (UniqueName: \"kubernetes.io/projected/4b09231e-544a-4007-b9a7-8c75515749f9-kube-api-access-tkzgj\") pod \"redhat-operators-66kwf\" (UID: \"4b09231e-544a-4007-b9a7-8c75515749f9\") " pod="openshift-marketplace/redhat-operators-66kwf" Nov 26 23:28:44 crc kubenswrapper[4903]: I1126 23:28:44.471078 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-66kwf" Nov 26 23:28:44 crc kubenswrapper[4903]: I1126 23:28:44.977143 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-66kwf"] Nov 26 23:28:45 crc kubenswrapper[4903]: I1126 23:28:45.349851 4903 generic.go:334] "Generic (PLEG): container finished" podID="4b09231e-544a-4007-b9a7-8c75515749f9" containerID="4068f97bc04bc31452836524eecfa5ffef43c99c9eab3efbfa0755cf8841e512" exitCode=0 Nov 26 23:28:45 crc kubenswrapper[4903]: I1126 23:28:45.349911 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-66kwf" event={"ID":"4b09231e-544a-4007-b9a7-8c75515749f9","Type":"ContainerDied","Data":"4068f97bc04bc31452836524eecfa5ffef43c99c9eab3efbfa0755cf8841e512"} Nov 26 23:28:45 crc kubenswrapper[4903]: I1126 23:28:45.350278 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-66kwf" event={"ID":"4b09231e-544a-4007-b9a7-8c75515749f9","Type":"ContainerStarted","Data":"2997bb6e352b188404c7bc81e3629a974f402d1f2a72a357eaa0b2ff8b72339b"} Nov 26 23:28:45 crc kubenswrapper[4903]: I1126 23:28:45.351939 4903 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 23:28:46 crc kubenswrapper[4903]: I1126 23:28:46.363683 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-66kwf" event={"ID":"4b09231e-544a-4007-b9a7-8c75515749f9","Type":"ContainerStarted","Data":"9ceb36132ac0b3f54eacd993c9288a3ee1984f8c090196d173eb5600f6340539"} Nov 26 23:28:50 crc kubenswrapper[4903]: I1126 23:28:50.426836 4903 generic.go:334] "Generic (PLEG): container finished" podID="4b09231e-544a-4007-b9a7-8c75515749f9" containerID="9ceb36132ac0b3f54eacd993c9288a3ee1984f8c090196d173eb5600f6340539" exitCode=0 Nov 26 23:28:50 crc kubenswrapper[4903]: I1126 23:28:50.426898 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-66kwf" event={"ID":"4b09231e-544a-4007-b9a7-8c75515749f9","Type":"ContainerDied","Data":"9ceb36132ac0b3f54eacd993c9288a3ee1984f8c090196d173eb5600f6340539"} Nov 26 23:28:52 crc kubenswrapper[4903]: I1126 23:28:52.456846 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-66kwf" event={"ID":"4b09231e-544a-4007-b9a7-8c75515749f9","Type":"ContainerStarted","Data":"8bd04cda33b011a79507216670601f1ee3c94c71ba36c79aef43374020bf72ed"} Nov 26 23:28:52 crc kubenswrapper[4903]: I1126 23:28:52.480385 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-66kwf" podStartSLOduration=2.672837586 podStartE2EDuration="8.480364216s" podCreationTimestamp="2025-11-26 23:28:44 +0000 UTC" firstStartedPulling="2025-11-26 23:28:45.351634077 +0000 UTC m=+4054.041868987" lastFinishedPulling="2025-11-26 23:28:51.159160667 +0000 UTC m=+4059.849395617" observedRunningTime="2025-11-26 23:28:52.471616289 +0000 UTC m=+4061.161851199" watchObservedRunningTime="2025-11-26 23:28:52.480364216 +0000 UTC m=+4061.170599126" Nov 26 23:28:54 crc kubenswrapper[4903]: I1126 23:28:54.471300 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-66kwf" Nov 26 23:28:54 crc kubenswrapper[4903]: I1126 23:28:54.471640 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-66kwf" Nov 26 23:28:55 crc kubenswrapper[4903]: I1126 23:28:55.029054 4903 scope.go:117] "RemoveContainer" containerID="acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6" Nov 26 23:28:55 crc kubenswrapper[4903]: E1126 23:28:55.029798 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:28:55 crc kubenswrapper[4903]: I1126 23:28:55.530632 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-66kwf" podUID="4b09231e-544a-4007-b9a7-8c75515749f9" containerName="registry-server" probeResult="failure" output=< Nov 26 23:28:55 crc kubenswrapper[4903]: timeout: failed to connect service ":50051" within 1s Nov 26 23:28:55 crc kubenswrapper[4903]: > Nov 26 23:29:06 crc kubenswrapper[4903]: I1126 23:29:06.030376 4903 scope.go:117] "RemoveContainer" containerID="acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6" Nov 26 23:29:06 crc kubenswrapper[4903]: E1126 23:29:06.031477 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:29:06 crc kubenswrapper[4903]: I1126 23:29:06.259806 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-66kwf" podUID="4b09231e-544a-4007-b9a7-8c75515749f9" containerName="registry-server" probeResult="failure" output=< Nov 26 23:29:06 crc kubenswrapper[4903]: timeout: failed to connect service ":50051" within 1s Nov 26 23:29:06 crc kubenswrapper[4903]: > Nov 26 23:29:15 crc kubenswrapper[4903]: I1126 23:29:15.534746 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-66kwf" podUID="4b09231e-544a-4007-b9a7-8c75515749f9" containerName="registry-server" probeResult="failure" output=< Nov 26 23:29:15 crc kubenswrapper[4903]: timeout: failed to connect service ":50051" within 1s Nov 26 23:29:15 crc kubenswrapper[4903]: > Nov 26 23:29:21 crc kubenswrapper[4903]: I1126 23:29:21.029103 4903 scope.go:117] "RemoveContainer" containerID="acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6" Nov 26 23:29:21 crc kubenswrapper[4903]: E1126 23:29:21.031075 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:29:24 crc kubenswrapper[4903]: I1126 23:29:24.538056 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-66kwf" Nov 26 23:29:24 crc kubenswrapper[4903]: I1126 23:29:24.613287 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-66kwf" Nov 26 23:29:24 crc kubenswrapper[4903]: I1126 23:29:24.791069 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-66kwf"] Nov 26 23:29:25 crc kubenswrapper[4903]: I1126 23:29:25.953358 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-66kwf" podUID="4b09231e-544a-4007-b9a7-8c75515749f9" containerName="registry-server" containerID="cri-o://8bd04cda33b011a79507216670601f1ee3c94c71ba36c79aef43374020bf72ed" gracePeriod=2 Nov 26 23:29:26 crc kubenswrapper[4903]: I1126 23:29:26.547499 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-66kwf" Nov 26 23:29:26 crc kubenswrapper[4903]: I1126 23:29:26.658643 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b09231e-544a-4007-b9a7-8c75515749f9-catalog-content\") pod \"4b09231e-544a-4007-b9a7-8c75515749f9\" (UID: \"4b09231e-544a-4007-b9a7-8c75515749f9\") " Nov 26 23:29:26 crc kubenswrapper[4903]: I1126 23:29:26.658720 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tkzgj\" (UniqueName: \"kubernetes.io/projected/4b09231e-544a-4007-b9a7-8c75515749f9-kube-api-access-tkzgj\") pod \"4b09231e-544a-4007-b9a7-8c75515749f9\" (UID: \"4b09231e-544a-4007-b9a7-8c75515749f9\") " Nov 26 23:29:26 crc kubenswrapper[4903]: I1126 23:29:26.658843 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b09231e-544a-4007-b9a7-8c75515749f9-utilities\") pod \"4b09231e-544a-4007-b9a7-8c75515749f9\" (UID: \"4b09231e-544a-4007-b9a7-8c75515749f9\") " Nov 26 23:29:26 crc kubenswrapper[4903]: I1126 23:29:26.659742 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b09231e-544a-4007-b9a7-8c75515749f9-utilities" (OuterVolumeSpecName: "utilities") pod "4b09231e-544a-4007-b9a7-8c75515749f9" (UID: "4b09231e-544a-4007-b9a7-8c75515749f9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:29:26 crc kubenswrapper[4903]: I1126 23:29:26.666597 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b09231e-544a-4007-b9a7-8c75515749f9-kube-api-access-tkzgj" (OuterVolumeSpecName: "kube-api-access-tkzgj") pod "4b09231e-544a-4007-b9a7-8c75515749f9" (UID: "4b09231e-544a-4007-b9a7-8c75515749f9"). InnerVolumeSpecName "kube-api-access-tkzgj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:29:26 crc kubenswrapper[4903]: I1126 23:29:26.758586 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b09231e-544a-4007-b9a7-8c75515749f9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4b09231e-544a-4007-b9a7-8c75515749f9" (UID: "4b09231e-544a-4007-b9a7-8c75515749f9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:29:26 crc kubenswrapper[4903]: I1126 23:29:26.761786 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b09231e-544a-4007-b9a7-8c75515749f9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 23:29:26 crc kubenswrapper[4903]: I1126 23:29:26.761820 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tkzgj\" (UniqueName: \"kubernetes.io/projected/4b09231e-544a-4007-b9a7-8c75515749f9-kube-api-access-tkzgj\") on node \"crc\" DevicePath \"\"" Nov 26 23:29:26 crc kubenswrapper[4903]: I1126 23:29:26.761833 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b09231e-544a-4007-b9a7-8c75515749f9-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 23:29:26 crc kubenswrapper[4903]: I1126 23:29:26.965663 4903 generic.go:334] "Generic (PLEG): container finished" podID="4b09231e-544a-4007-b9a7-8c75515749f9" containerID="8bd04cda33b011a79507216670601f1ee3c94c71ba36c79aef43374020bf72ed" exitCode=0 Nov 26 23:29:26 crc kubenswrapper[4903]: I1126 23:29:26.965755 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-66kwf" event={"ID":"4b09231e-544a-4007-b9a7-8c75515749f9","Type":"ContainerDied","Data":"8bd04cda33b011a79507216670601f1ee3c94c71ba36c79aef43374020bf72ed"} Nov 26 23:29:26 crc kubenswrapper[4903]: I1126 23:29:26.966070 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-66kwf" event={"ID":"4b09231e-544a-4007-b9a7-8c75515749f9","Type":"ContainerDied","Data":"2997bb6e352b188404c7bc81e3629a974f402d1f2a72a357eaa0b2ff8b72339b"} Nov 26 23:29:26 crc kubenswrapper[4903]: I1126 23:29:26.965842 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-66kwf" Nov 26 23:29:26 crc kubenswrapper[4903]: I1126 23:29:26.966100 4903 scope.go:117] "RemoveContainer" containerID="8bd04cda33b011a79507216670601f1ee3c94c71ba36c79aef43374020bf72ed" Nov 26 23:29:27 crc kubenswrapper[4903]: I1126 23:29:27.002218 4903 scope.go:117] "RemoveContainer" containerID="9ceb36132ac0b3f54eacd993c9288a3ee1984f8c090196d173eb5600f6340539" Nov 26 23:29:27 crc kubenswrapper[4903]: I1126 23:29:27.013468 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-66kwf"] Nov 26 23:29:27 crc kubenswrapper[4903]: I1126 23:29:27.024753 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-66kwf"] Nov 26 23:29:27 crc kubenswrapper[4903]: I1126 23:29:27.030294 4903 scope.go:117] "RemoveContainer" containerID="4068f97bc04bc31452836524eecfa5ffef43c99c9eab3efbfa0755cf8841e512" Nov 26 23:29:27 crc kubenswrapper[4903]: I1126 23:29:27.102923 4903 scope.go:117] "RemoveContainer" containerID="8bd04cda33b011a79507216670601f1ee3c94c71ba36c79aef43374020bf72ed" Nov 26 23:29:27 crc kubenswrapper[4903]: E1126 23:29:27.103413 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8bd04cda33b011a79507216670601f1ee3c94c71ba36c79aef43374020bf72ed\": container with ID starting with 8bd04cda33b011a79507216670601f1ee3c94c71ba36c79aef43374020bf72ed not found: ID does not exist" containerID="8bd04cda33b011a79507216670601f1ee3c94c71ba36c79aef43374020bf72ed" Nov 26 23:29:27 crc kubenswrapper[4903]: I1126 23:29:27.103445 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8bd04cda33b011a79507216670601f1ee3c94c71ba36c79aef43374020bf72ed"} err="failed to get container status \"8bd04cda33b011a79507216670601f1ee3c94c71ba36c79aef43374020bf72ed\": rpc error: code = NotFound desc = could not find container \"8bd04cda33b011a79507216670601f1ee3c94c71ba36c79aef43374020bf72ed\": container with ID starting with 8bd04cda33b011a79507216670601f1ee3c94c71ba36c79aef43374020bf72ed not found: ID does not exist" Nov 26 23:29:27 crc kubenswrapper[4903]: I1126 23:29:27.103466 4903 scope.go:117] "RemoveContainer" containerID="9ceb36132ac0b3f54eacd993c9288a3ee1984f8c090196d173eb5600f6340539" Nov 26 23:29:27 crc kubenswrapper[4903]: E1126 23:29:27.104021 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ceb36132ac0b3f54eacd993c9288a3ee1984f8c090196d173eb5600f6340539\": container with ID starting with 9ceb36132ac0b3f54eacd993c9288a3ee1984f8c090196d173eb5600f6340539 not found: ID does not exist" containerID="9ceb36132ac0b3f54eacd993c9288a3ee1984f8c090196d173eb5600f6340539" Nov 26 23:29:27 crc kubenswrapper[4903]: I1126 23:29:27.104093 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ceb36132ac0b3f54eacd993c9288a3ee1984f8c090196d173eb5600f6340539"} err="failed to get container status \"9ceb36132ac0b3f54eacd993c9288a3ee1984f8c090196d173eb5600f6340539\": rpc error: code = NotFound desc = could not find container \"9ceb36132ac0b3f54eacd993c9288a3ee1984f8c090196d173eb5600f6340539\": container with ID starting with 9ceb36132ac0b3f54eacd993c9288a3ee1984f8c090196d173eb5600f6340539 not found: ID does not exist" Nov 26 23:29:27 crc kubenswrapper[4903]: I1126 23:29:27.104147 4903 scope.go:117] "RemoveContainer" containerID="4068f97bc04bc31452836524eecfa5ffef43c99c9eab3efbfa0755cf8841e512" Nov 26 23:29:27 crc kubenswrapper[4903]: E1126 23:29:27.104582 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4068f97bc04bc31452836524eecfa5ffef43c99c9eab3efbfa0755cf8841e512\": container with ID starting with 4068f97bc04bc31452836524eecfa5ffef43c99c9eab3efbfa0755cf8841e512 not found: ID does not exist" containerID="4068f97bc04bc31452836524eecfa5ffef43c99c9eab3efbfa0755cf8841e512" Nov 26 23:29:27 crc kubenswrapper[4903]: I1126 23:29:27.104658 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4068f97bc04bc31452836524eecfa5ffef43c99c9eab3efbfa0755cf8841e512"} err="failed to get container status \"4068f97bc04bc31452836524eecfa5ffef43c99c9eab3efbfa0755cf8841e512\": rpc error: code = NotFound desc = could not find container \"4068f97bc04bc31452836524eecfa5ffef43c99c9eab3efbfa0755cf8841e512\": container with ID starting with 4068f97bc04bc31452836524eecfa5ffef43c99c9eab3efbfa0755cf8841e512 not found: ID does not exist" Nov 26 23:29:28 crc kubenswrapper[4903]: I1126 23:29:28.041522 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b09231e-544a-4007-b9a7-8c75515749f9" path="/var/lib/kubelet/pods/4b09231e-544a-4007-b9a7-8c75515749f9/volumes" Nov 26 23:29:33 crc kubenswrapper[4903]: I1126 23:29:33.029225 4903 scope.go:117] "RemoveContainer" containerID="acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6" Nov 26 23:29:33 crc kubenswrapper[4903]: E1126 23:29:33.030374 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:29:46 crc kubenswrapper[4903]: I1126 23:29:46.028560 4903 scope.go:117] "RemoveContainer" containerID="acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6" Nov 26 23:29:46 crc kubenswrapper[4903]: E1126 23:29:46.029164 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:29:57 crc kubenswrapper[4903]: I1126 23:29:57.030326 4903 scope.go:117] "RemoveContainer" containerID="acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6" Nov 26 23:29:57 crc kubenswrapper[4903]: E1126 23:29:57.031142 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:30:00 crc kubenswrapper[4903]: I1126 23:30:00.151671 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403330-c78fr"] Nov 26 23:30:00 crc kubenswrapper[4903]: E1126 23:30:00.152788 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b09231e-544a-4007-b9a7-8c75515749f9" containerName="registry-server" Nov 26 23:30:00 crc kubenswrapper[4903]: I1126 23:30:00.152804 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b09231e-544a-4007-b9a7-8c75515749f9" containerName="registry-server" Nov 26 23:30:00 crc kubenswrapper[4903]: E1126 23:30:00.152847 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b09231e-544a-4007-b9a7-8c75515749f9" containerName="extract-content" Nov 26 23:30:00 crc kubenswrapper[4903]: I1126 23:30:00.152856 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b09231e-544a-4007-b9a7-8c75515749f9" containerName="extract-content" Nov 26 23:30:00 crc kubenswrapper[4903]: E1126 23:30:00.152892 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b09231e-544a-4007-b9a7-8c75515749f9" containerName="extract-utilities" Nov 26 23:30:00 crc kubenswrapper[4903]: I1126 23:30:00.152900 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b09231e-544a-4007-b9a7-8c75515749f9" containerName="extract-utilities" Nov 26 23:30:00 crc kubenswrapper[4903]: I1126 23:30:00.153190 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b09231e-544a-4007-b9a7-8c75515749f9" containerName="registry-server" Nov 26 23:30:00 crc kubenswrapper[4903]: I1126 23:30:00.154240 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403330-c78fr" Nov 26 23:30:00 crc kubenswrapper[4903]: I1126 23:30:00.156725 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 23:30:00 crc kubenswrapper[4903]: I1126 23:30:00.156931 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 23:30:00 crc kubenswrapper[4903]: I1126 23:30:00.163470 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403330-c78fr"] Nov 26 23:30:00 crc kubenswrapper[4903]: I1126 23:30:00.223094 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/54c8bbb7-0e41-4734-a723-33bf901ae7ea-config-volume\") pod \"collect-profiles-29403330-c78fr\" (UID: \"54c8bbb7-0e41-4734-a723-33bf901ae7ea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403330-c78fr" Nov 26 23:30:00 crc kubenswrapper[4903]: I1126 23:30:00.224127 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/54c8bbb7-0e41-4734-a723-33bf901ae7ea-secret-volume\") pod \"collect-profiles-29403330-c78fr\" (UID: \"54c8bbb7-0e41-4734-a723-33bf901ae7ea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403330-c78fr" Nov 26 23:30:00 crc kubenswrapper[4903]: I1126 23:30:00.224203 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2twt\" (UniqueName: \"kubernetes.io/projected/54c8bbb7-0e41-4734-a723-33bf901ae7ea-kube-api-access-j2twt\") pod \"collect-profiles-29403330-c78fr\" (UID: \"54c8bbb7-0e41-4734-a723-33bf901ae7ea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403330-c78fr" Nov 26 23:30:00 crc kubenswrapper[4903]: I1126 23:30:00.326461 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/54c8bbb7-0e41-4734-a723-33bf901ae7ea-secret-volume\") pod \"collect-profiles-29403330-c78fr\" (UID: \"54c8bbb7-0e41-4734-a723-33bf901ae7ea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403330-c78fr" Nov 26 23:30:00 crc kubenswrapper[4903]: I1126 23:30:00.326752 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2twt\" (UniqueName: \"kubernetes.io/projected/54c8bbb7-0e41-4734-a723-33bf901ae7ea-kube-api-access-j2twt\") pod \"collect-profiles-29403330-c78fr\" (UID: \"54c8bbb7-0e41-4734-a723-33bf901ae7ea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403330-c78fr" Nov 26 23:30:00 crc kubenswrapper[4903]: I1126 23:30:00.326884 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/54c8bbb7-0e41-4734-a723-33bf901ae7ea-config-volume\") pod \"collect-profiles-29403330-c78fr\" (UID: \"54c8bbb7-0e41-4734-a723-33bf901ae7ea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403330-c78fr" Nov 26 23:30:00 crc kubenswrapper[4903]: I1126 23:30:00.327648 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/54c8bbb7-0e41-4734-a723-33bf901ae7ea-config-volume\") pod \"collect-profiles-29403330-c78fr\" (UID: \"54c8bbb7-0e41-4734-a723-33bf901ae7ea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403330-c78fr" Nov 26 23:30:00 crc kubenswrapper[4903]: I1126 23:30:00.332939 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/54c8bbb7-0e41-4734-a723-33bf901ae7ea-secret-volume\") pod \"collect-profiles-29403330-c78fr\" (UID: \"54c8bbb7-0e41-4734-a723-33bf901ae7ea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403330-c78fr" Nov 26 23:30:00 crc kubenswrapper[4903]: I1126 23:30:00.344283 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2twt\" (UniqueName: \"kubernetes.io/projected/54c8bbb7-0e41-4734-a723-33bf901ae7ea-kube-api-access-j2twt\") pod \"collect-profiles-29403330-c78fr\" (UID: \"54c8bbb7-0e41-4734-a723-33bf901ae7ea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403330-c78fr" Nov 26 23:30:00 crc kubenswrapper[4903]: I1126 23:30:00.482629 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403330-c78fr" Nov 26 23:30:01 crc kubenswrapper[4903]: I1126 23:30:01.062465 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403330-c78fr"] Nov 26 23:30:01 crc kubenswrapper[4903]: I1126 23:30:01.413823 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403330-c78fr" event={"ID":"54c8bbb7-0e41-4734-a723-33bf901ae7ea","Type":"ContainerStarted","Data":"01cbb4f88242d44d8a23e5730b3a62482d9e438f77b4ebc28e168909a47cf853"} Nov 26 23:30:01 crc kubenswrapper[4903]: I1126 23:30:01.414225 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403330-c78fr" event={"ID":"54c8bbb7-0e41-4734-a723-33bf901ae7ea","Type":"ContainerStarted","Data":"c54d50070f3fad7e2de9dff8740cb44f844d0707dd00e511321bcd5a1e136a82"} Nov 26 23:30:01 crc kubenswrapper[4903]: I1126 23:30:01.442331 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29403330-c78fr" podStartSLOduration=1.442304926 podStartE2EDuration="1.442304926s" podCreationTimestamp="2025-11-26 23:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 23:30:01.437148607 +0000 UTC m=+4130.127383507" watchObservedRunningTime="2025-11-26 23:30:01.442304926 +0000 UTC m=+4130.132539846" Nov 26 23:30:01 crc kubenswrapper[4903]: E1126 23:30:01.813890 4903 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod54c8bbb7_0e41_4734_a723_33bf901ae7ea.slice/crio-conmon-01cbb4f88242d44d8a23e5730b3a62482d9e438f77b4ebc28e168909a47cf853.scope\": RecentStats: unable to find data in memory cache]" Nov 26 23:30:02 crc kubenswrapper[4903]: I1126 23:30:02.429871 4903 generic.go:334] "Generic (PLEG): container finished" podID="54c8bbb7-0e41-4734-a723-33bf901ae7ea" containerID="01cbb4f88242d44d8a23e5730b3a62482d9e438f77b4ebc28e168909a47cf853" exitCode=0 Nov 26 23:30:02 crc kubenswrapper[4903]: I1126 23:30:02.430003 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403330-c78fr" event={"ID":"54c8bbb7-0e41-4734-a723-33bf901ae7ea","Type":"ContainerDied","Data":"01cbb4f88242d44d8a23e5730b3a62482d9e438f77b4ebc28e168909a47cf853"} Nov 26 23:30:03 crc kubenswrapper[4903]: I1126 23:30:03.874279 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403330-c78fr" Nov 26 23:30:03 crc kubenswrapper[4903]: I1126 23:30:03.909812 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j2twt\" (UniqueName: \"kubernetes.io/projected/54c8bbb7-0e41-4734-a723-33bf901ae7ea-kube-api-access-j2twt\") pod \"54c8bbb7-0e41-4734-a723-33bf901ae7ea\" (UID: \"54c8bbb7-0e41-4734-a723-33bf901ae7ea\") " Nov 26 23:30:03 crc kubenswrapper[4903]: I1126 23:30:03.909866 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/54c8bbb7-0e41-4734-a723-33bf901ae7ea-config-volume\") pod \"54c8bbb7-0e41-4734-a723-33bf901ae7ea\" (UID: \"54c8bbb7-0e41-4734-a723-33bf901ae7ea\") " Nov 26 23:30:03 crc kubenswrapper[4903]: I1126 23:30:03.910176 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/54c8bbb7-0e41-4734-a723-33bf901ae7ea-secret-volume\") pod \"54c8bbb7-0e41-4734-a723-33bf901ae7ea\" (UID: \"54c8bbb7-0e41-4734-a723-33bf901ae7ea\") " Nov 26 23:30:03 crc kubenswrapper[4903]: I1126 23:30:03.910821 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/54c8bbb7-0e41-4734-a723-33bf901ae7ea-config-volume" (OuterVolumeSpecName: "config-volume") pod "54c8bbb7-0e41-4734-a723-33bf901ae7ea" (UID: "54c8bbb7-0e41-4734-a723-33bf901ae7ea"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 23:30:03 crc kubenswrapper[4903]: I1126 23:30:03.915843 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54c8bbb7-0e41-4734-a723-33bf901ae7ea-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "54c8bbb7-0e41-4734-a723-33bf901ae7ea" (UID: "54c8bbb7-0e41-4734-a723-33bf901ae7ea"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:30:03 crc kubenswrapper[4903]: I1126 23:30:03.916205 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54c8bbb7-0e41-4734-a723-33bf901ae7ea-kube-api-access-j2twt" (OuterVolumeSpecName: "kube-api-access-j2twt") pod "54c8bbb7-0e41-4734-a723-33bf901ae7ea" (UID: "54c8bbb7-0e41-4734-a723-33bf901ae7ea"). InnerVolumeSpecName "kube-api-access-j2twt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:30:04 crc kubenswrapper[4903]: I1126 23:30:04.013310 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j2twt\" (UniqueName: \"kubernetes.io/projected/54c8bbb7-0e41-4734-a723-33bf901ae7ea-kube-api-access-j2twt\") on node \"crc\" DevicePath \"\"" Nov 26 23:30:04 crc kubenswrapper[4903]: I1126 23:30:04.013344 4903 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/54c8bbb7-0e41-4734-a723-33bf901ae7ea-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 23:30:04 crc kubenswrapper[4903]: I1126 23:30:04.013354 4903 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/54c8bbb7-0e41-4734-a723-33bf901ae7ea-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 23:30:04 crc kubenswrapper[4903]: I1126 23:30:04.462406 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403330-c78fr" event={"ID":"54c8bbb7-0e41-4734-a723-33bf901ae7ea","Type":"ContainerDied","Data":"c54d50070f3fad7e2de9dff8740cb44f844d0707dd00e511321bcd5a1e136a82"} Nov 26 23:30:04 crc kubenswrapper[4903]: I1126 23:30:04.462783 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c54d50070f3fad7e2de9dff8740cb44f844d0707dd00e511321bcd5a1e136a82" Nov 26 23:30:04 crc kubenswrapper[4903]: I1126 23:30:04.462579 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403330-c78fr" Nov 26 23:30:04 crc kubenswrapper[4903]: I1126 23:30:04.526862 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403285-qjtgm"] Nov 26 23:30:04 crc kubenswrapper[4903]: I1126 23:30:04.538395 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403285-qjtgm"] Nov 26 23:30:06 crc kubenswrapper[4903]: I1126 23:30:06.042183 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="380938eb-7cfc-4a1c-8710-bc2279ca6b82" path="/var/lib/kubelet/pods/380938eb-7cfc-4a1c-8710-bc2279ca6b82/volumes" Nov 26 23:30:12 crc kubenswrapper[4903]: I1126 23:30:12.044805 4903 scope.go:117] "RemoveContainer" containerID="acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6" Nov 26 23:30:12 crc kubenswrapper[4903]: E1126 23:30:12.046092 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:30:18 crc kubenswrapper[4903]: I1126 23:30:18.620432 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bg7zh"] Nov 26 23:30:18 crc kubenswrapper[4903]: E1126 23:30:18.621756 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54c8bbb7-0e41-4734-a723-33bf901ae7ea" containerName="collect-profiles" Nov 26 23:30:18 crc kubenswrapper[4903]: I1126 23:30:18.621794 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="54c8bbb7-0e41-4734-a723-33bf901ae7ea" containerName="collect-profiles" Nov 26 23:30:18 crc kubenswrapper[4903]: I1126 23:30:18.622157 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="54c8bbb7-0e41-4734-a723-33bf901ae7ea" containerName="collect-profiles" Nov 26 23:30:18 crc kubenswrapper[4903]: I1126 23:30:18.624602 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bg7zh" Nov 26 23:30:18 crc kubenswrapper[4903]: I1126 23:30:18.641854 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bg7zh"] Nov 26 23:30:18 crc kubenswrapper[4903]: I1126 23:30:18.706632 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c22hc\" (UniqueName: \"kubernetes.io/projected/d16421f1-30a9-4960-88c7-a1bb896ea7b3-kube-api-access-c22hc\") pod \"community-operators-bg7zh\" (UID: \"d16421f1-30a9-4960-88c7-a1bb896ea7b3\") " pod="openshift-marketplace/community-operators-bg7zh" Nov 26 23:30:18 crc kubenswrapper[4903]: I1126 23:30:18.706890 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d16421f1-30a9-4960-88c7-a1bb896ea7b3-utilities\") pod \"community-operators-bg7zh\" (UID: \"d16421f1-30a9-4960-88c7-a1bb896ea7b3\") " pod="openshift-marketplace/community-operators-bg7zh" Nov 26 23:30:18 crc kubenswrapper[4903]: I1126 23:30:18.707109 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d16421f1-30a9-4960-88c7-a1bb896ea7b3-catalog-content\") pod \"community-operators-bg7zh\" (UID: \"d16421f1-30a9-4960-88c7-a1bb896ea7b3\") " pod="openshift-marketplace/community-operators-bg7zh" Nov 26 23:30:18 crc kubenswrapper[4903]: I1126 23:30:18.809200 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d16421f1-30a9-4960-88c7-a1bb896ea7b3-catalog-content\") pod \"community-operators-bg7zh\" (UID: \"d16421f1-30a9-4960-88c7-a1bb896ea7b3\") " pod="openshift-marketplace/community-operators-bg7zh" Nov 26 23:30:18 crc kubenswrapper[4903]: I1126 23:30:18.809320 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c22hc\" (UniqueName: \"kubernetes.io/projected/d16421f1-30a9-4960-88c7-a1bb896ea7b3-kube-api-access-c22hc\") pod \"community-operators-bg7zh\" (UID: \"d16421f1-30a9-4960-88c7-a1bb896ea7b3\") " pod="openshift-marketplace/community-operators-bg7zh" Nov 26 23:30:18 crc kubenswrapper[4903]: I1126 23:30:18.809427 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d16421f1-30a9-4960-88c7-a1bb896ea7b3-utilities\") pod \"community-operators-bg7zh\" (UID: \"d16421f1-30a9-4960-88c7-a1bb896ea7b3\") " pod="openshift-marketplace/community-operators-bg7zh" Nov 26 23:30:18 crc kubenswrapper[4903]: I1126 23:30:18.809803 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d16421f1-30a9-4960-88c7-a1bb896ea7b3-catalog-content\") pod \"community-operators-bg7zh\" (UID: \"d16421f1-30a9-4960-88c7-a1bb896ea7b3\") " pod="openshift-marketplace/community-operators-bg7zh" Nov 26 23:30:18 crc kubenswrapper[4903]: I1126 23:30:18.809937 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d16421f1-30a9-4960-88c7-a1bb896ea7b3-utilities\") pod \"community-operators-bg7zh\" (UID: \"d16421f1-30a9-4960-88c7-a1bb896ea7b3\") " pod="openshift-marketplace/community-operators-bg7zh" Nov 26 23:30:18 crc kubenswrapper[4903]: I1126 23:30:18.826519 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c22hc\" (UniqueName: \"kubernetes.io/projected/d16421f1-30a9-4960-88c7-a1bb896ea7b3-kube-api-access-c22hc\") pod \"community-operators-bg7zh\" (UID: \"d16421f1-30a9-4960-88c7-a1bb896ea7b3\") " pod="openshift-marketplace/community-operators-bg7zh" Nov 26 23:30:18 crc kubenswrapper[4903]: I1126 23:30:18.958908 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bg7zh" Nov 26 23:30:19 crc kubenswrapper[4903]: I1126 23:30:19.498641 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bg7zh"] Nov 26 23:30:19 crc kubenswrapper[4903]: I1126 23:30:19.676200 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bg7zh" event={"ID":"d16421f1-30a9-4960-88c7-a1bb896ea7b3","Type":"ContainerStarted","Data":"e5b72b3d9d241aefe9ac6b3381ac6f9ae10ed25bbc791397e752f73b84cdbc8a"} Nov 26 23:30:20 crc kubenswrapper[4903]: I1126 23:30:20.697910 4903 generic.go:334] "Generic (PLEG): container finished" podID="d16421f1-30a9-4960-88c7-a1bb896ea7b3" containerID="a80e770b521cc138e93f737330479b406d29dd8d40bae918e30e4523ccf7b6ed" exitCode=0 Nov 26 23:30:20 crc kubenswrapper[4903]: I1126 23:30:20.698018 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bg7zh" event={"ID":"d16421f1-30a9-4960-88c7-a1bb896ea7b3","Type":"ContainerDied","Data":"a80e770b521cc138e93f737330479b406d29dd8d40bae918e30e4523ccf7b6ed"} Nov 26 23:30:22 crc kubenswrapper[4903]: I1126 23:30:22.730484 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bg7zh" event={"ID":"d16421f1-30a9-4960-88c7-a1bb896ea7b3","Type":"ContainerStarted","Data":"0f080c51af8dff862208f80bc0c636c928bc998d001fa8555f18d98670de4380"} Nov 26 23:30:23 crc kubenswrapper[4903]: I1126 23:30:23.070867 4903 scope.go:117] "RemoveContainer" containerID="992cc963df3f71ef88966f1cfbd1be25ac845ce8c8601ee917b68f5d10de91ba" Nov 26 23:30:23 crc kubenswrapper[4903]: I1126 23:30:23.745608 4903 generic.go:334] "Generic (PLEG): container finished" podID="d16421f1-30a9-4960-88c7-a1bb896ea7b3" containerID="0f080c51af8dff862208f80bc0c636c928bc998d001fa8555f18d98670de4380" exitCode=0 Nov 26 23:30:23 crc kubenswrapper[4903]: I1126 23:30:23.745970 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bg7zh" event={"ID":"d16421f1-30a9-4960-88c7-a1bb896ea7b3","Type":"ContainerDied","Data":"0f080c51af8dff862208f80bc0c636c928bc998d001fa8555f18d98670de4380"} Nov 26 23:30:25 crc kubenswrapper[4903]: I1126 23:30:25.028969 4903 scope.go:117] "RemoveContainer" containerID="acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6" Nov 26 23:30:25 crc kubenswrapper[4903]: E1126 23:30:25.029884 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:30:25 crc kubenswrapper[4903]: I1126 23:30:25.796055 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bg7zh" event={"ID":"d16421f1-30a9-4960-88c7-a1bb896ea7b3","Type":"ContainerStarted","Data":"0c44af46150037c2bcd98f99c1f61f30af4ecfd7b2a32dca779189f8e41d5f56"} Nov 26 23:30:25 crc kubenswrapper[4903]: I1126 23:30:25.831570 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bg7zh" podStartSLOduration=3.458588196 podStartE2EDuration="7.83155199s" podCreationTimestamp="2025-11-26 23:30:18 +0000 UTC" firstStartedPulling="2025-11-26 23:30:20.70124285 +0000 UTC m=+4149.391477770" lastFinishedPulling="2025-11-26 23:30:25.074206654 +0000 UTC m=+4153.764441564" observedRunningTime="2025-11-26 23:30:25.820309707 +0000 UTC m=+4154.510544647" watchObservedRunningTime="2025-11-26 23:30:25.83155199 +0000 UTC m=+4154.521786900" Nov 26 23:30:28 crc kubenswrapper[4903]: I1126 23:30:28.959222 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bg7zh" Nov 26 23:30:28 crc kubenswrapper[4903]: I1126 23:30:28.960171 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bg7zh" Nov 26 23:30:29 crc kubenswrapper[4903]: I1126 23:30:29.033601 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bg7zh" Nov 26 23:30:38 crc kubenswrapper[4903]: I1126 23:30:38.029261 4903 scope.go:117] "RemoveContainer" containerID="acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6" Nov 26 23:30:38 crc kubenswrapper[4903]: I1126 23:30:38.982757 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerStarted","Data":"534fbdcf6d0ec22c7d2eaa85f7e96a569f1e62972595e741c0ca79bbdc1f5967"} Nov 26 23:30:39 crc kubenswrapper[4903]: I1126 23:30:39.048325 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bg7zh" Nov 26 23:30:39 crc kubenswrapper[4903]: I1126 23:30:39.122484 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bg7zh"] Nov 26 23:30:39 crc kubenswrapper[4903]: I1126 23:30:39.994382 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bg7zh" podUID="d16421f1-30a9-4960-88c7-a1bb896ea7b3" containerName="registry-server" containerID="cri-o://0c44af46150037c2bcd98f99c1f61f30af4ecfd7b2a32dca779189f8e41d5f56" gracePeriod=2 Nov 26 23:30:40 crc kubenswrapper[4903]: I1126 23:30:40.516551 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bg7zh" Nov 26 23:30:40 crc kubenswrapper[4903]: I1126 23:30:40.583837 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d16421f1-30a9-4960-88c7-a1bb896ea7b3-utilities\") pod \"d16421f1-30a9-4960-88c7-a1bb896ea7b3\" (UID: \"d16421f1-30a9-4960-88c7-a1bb896ea7b3\") " Nov 26 23:30:40 crc kubenswrapper[4903]: I1126 23:30:40.583944 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c22hc\" (UniqueName: \"kubernetes.io/projected/d16421f1-30a9-4960-88c7-a1bb896ea7b3-kube-api-access-c22hc\") pod \"d16421f1-30a9-4960-88c7-a1bb896ea7b3\" (UID: \"d16421f1-30a9-4960-88c7-a1bb896ea7b3\") " Nov 26 23:30:40 crc kubenswrapper[4903]: I1126 23:30:40.584103 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d16421f1-30a9-4960-88c7-a1bb896ea7b3-catalog-content\") pod \"d16421f1-30a9-4960-88c7-a1bb896ea7b3\" (UID: \"d16421f1-30a9-4960-88c7-a1bb896ea7b3\") " Nov 26 23:30:40 crc kubenswrapper[4903]: I1126 23:30:40.584803 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d16421f1-30a9-4960-88c7-a1bb896ea7b3-utilities" (OuterVolumeSpecName: "utilities") pod "d16421f1-30a9-4960-88c7-a1bb896ea7b3" (UID: "d16421f1-30a9-4960-88c7-a1bb896ea7b3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:30:40 crc kubenswrapper[4903]: I1126 23:30:40.590772 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d16421f1-30a9-4960-88c7-a1bb896ea7b3-kube-api-access-c22hc" (OuterVolumeSpecName: "kube-api-access-c22hc") pod "d16421f1-30a9-4960-88c7-a1bb896ea7b3" (UID: "d16421f1-30a9-4960-88c7-a1bb896ea7b3"). InnerVolumeSpecName "kube-api-access-c22hc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:30:40 crc kubenswrapper[4903]: I1126 23:30:40.648127 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d16421f1-30a9-4960-88c7-a1bb896ea7b3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d16421f1-30a9-4960-88c7-a1bb896ea7b3" (UID: "d16421f1-30a9-4960-88c7-a1bb896ea7b3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:30:40 crc kubenswrapper[4903]: I1126 23:30:40.687422 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d16421f1-30a9-4960-88c7-a1bb896ea7b3-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 23:30:40 crc kubenswrapper[4903]: I1126 23:30:40.687785 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c22hc\" (UniqueName: \"kubernetes.io/projected/d16421f1-30a9-4960-88c7-a1bb896ea7b3-kube-api-access-c22hc\") on node \"crc\" DevicePath \"\"" Nov 26 23:30:40 crc kubenswrapper[4903]: I1126 23:30:40.687803 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d16421f1-30a9-4960-88c7-a1bb896ea7b3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 23:30:41 crc kubenswrapper[4903]: I1126 23:30:41.012959 4903 generic.go:334] "Generic (PLEG): container finished" podID="d16421f1-30a9-4960-88c7-a1bb896ea7b3" containerID="0c44af46150037c2bcd98f99c1f61f30af4ecfd7b2a32dca779189f8e41d5f56" exitCode=0 Nov 26 23:30:41 crc kubenswrapper[4903]: I1126 23:30:41.013040 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bg7zh" event={"ID":"d16421f1-30a9-4960-88c7-a1bb896ea7b3","Type":"ContainerDied","Data":"0c44af46150037c2bcd98f99c1f61f30af4ecfd7b2a32dca779189f8e41d5f56"} Nov 26 23:30:41 crc kubenswrapper[4903]: I1126 23:30:41.013092 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bg7zh" event={"ID":"d16421f1-30a9-4960-88c7-a1bb896ea7b3","Type":"ContainerDied","Data":"e5b72b3d9d241aefe9ac6b3381ac6f9ae10ed25bbc791397e752f73b84cdbc8a"} Nov 26 23:30:41 crc kubenswrapper[4903]: I1126 23:30:41.013132 4903 scope.go:117] "RemoveContainer" containerID="0c44af46150037c2bcd98f99c1f61f30af4ecfd7b2a32dca779189f8e41d5f56" Nov 26 23:30:41 crc kubenswrapper[4903]: I1126 23:30:41.013403 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bg7zh" Nov 26 23:30:41 crc kubenswrapper[4903]: I1126 23:30:41.036796 4903 scope.go:117] "RemoveContainer" containerID="0f080c51af8dff862208f80bc0c636c928bc998d001fa8555f18d98670de4380" Nov 26 23:30:41 crc kubenswrapper[4903]: I1126 23:30:41.069938 4903 scope.go:117] "RemoveContainer" containerID="a80e770b521cc138e93f737330479b406d29dd8d40bae918e30e4523ccf7b6ed" Nov 26 23:30:41 crc kubenswrapper[4903]: I1126 23:30:41.080513 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bg7zh"] Nov 26 23:30:41 crc kubenswrapper[4903]: I1126 23:30:41.106719 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bg7zh"] Nov 26 23:30:41 crc kubenswrapper[4903]: I1126 23:30:41.165464 4903 scope.go:117] "RemoveContainer" containerID="0c44af46150037c2bcd98f99c1f61f30af4ecfd7b2a32dca779189f8e41d5f56" Nov 26 23:30:41 crc kubenswrapper[4903]: E1126 23:30:41.166365 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c44af46150037c2bcd98f99c1f61f30af4ecfd7b2a32dca779189f8e41d5f56\": container with ID starting with 0c44af46150037c2bcd98f99c1f61f30af4ecfd7b2a32dca779189f8e41d5f56 not found: ID does not exist" containerID="0c44af46150037c2bcd98f99c1f61f30af4ecfd7b2a32dca779189f8e41d5f56" Nov 26 23:30:41 crc kubenswrapper[4903]: I1126 23:30:41.166411 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c44af46150037c2bcd98f99c1f61f30af4ecfd7b2a32dca779189f8e41d5f56"} err="failed to get container status \"0c44af46150037c2bcd98f99c1f61f30af4ecfd7b2a32dca779189f8e41d5f56\": rpc error: code = NotFound desc = could not find container \"0c44af46150037c2bcd98f99c1f61f30af4ecfd7b2a32dca779189f8e41d5f56\": container with ID starting with 0c44af46150037c2bcd98f99c1f61f30af4ecfd7b2a32dca779189f8e41d5f56 not found: ID does not exist" Nov 26 23:30:41 crc kubenswrapper[4903]: I1126 23:30:41.166440 4903 scope.go:117] "RemoveContainer" containerID="0f080c51af8dff862208f80bc0c636c928bc998d001fa8555f18d98670de4380" Nov 26 23:30:41 crc kubenswrapper[4903]: E1126 23:30:41.166982 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f080c51af8dff862208f80bc0c636c928bc998d001fa8555f18d98670de4380\": container with ID starting with 0f080c51af8dff862208f80bc0c636c928bc998d001fa8555f18d98670de4380 not found: ID does not exist" containerID="0f080c51af8dff862208f80bc0c636c928bc998d001fa8555f18d98670de4380" Nov 26 23:30:41 crc kubenswrapper[4903]: I1126 23:30:41.167007 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f080c51af8dff862208f80bc0c636c928bc998d001fa8555f18d98670de4380"} err="failed to get container status \"0f080c51af8dff862208f80bc0c636c928bc998d001fa8555f18d98670de4380\": rpc error: code = NotFound desc = could not find container \"0f080c51af8dff862208f80bc0c636c928bc998d001fa8555f18d98670de4380\": container with ID starting with 0f080c51af8dff862208f80bc0c636c928bc998d001fa8555f18d98670de4380 not found: ID does not exist" Nov 26 23:30:41 crc kubenswrapper[4903]: I1126 23:30:41.167020 4903 scope.go:117] "RemoveContainer" containerID="a80e770b521cc138e93f737330479b406d29dd8d40bae918e30e4523ccf7b6ed" Nov 26 23:30:41 crc kubenswrapper[4903]: E1126 23:30:41.168740 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a80e770b521cc138e93f737330479b406d29dd8d40bae918e30e4523ccf7b6ed\": container with ID starting with a80e770b521cc138e93f737330479b406d29dd8d40bae918e30e4523ccf7b6ed not found: ID does not exist" containerID="a80e770b521cc138e93f737330479b406d29dd8d40bae918e30e4523ccf7b6ed" Nov 26 23:30:41 crc kubenswrapper[4903]: I1126 23:30:41.168827 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a80e770b521cc138e93f737330479b406d29dd8d40bae918e30e4523ccf7b6ed"} err="failed to get container status \"a80e770b521cc138e93f737330479b406d29dd8d40bae918e30e4523ccf7b6ed\": rpc error: code = NotFound desc = could not find container \"a80e770b521cc138e93f737330479b406d29dd8d40bae918e30e4523ccf7b6ed\": container with ID starting with a80e770b521cc138e93f737330479b406d29dd8d40bae918e30e4523ccf7b6ed not found: ID does not exist" Nov 26 23:30:42 crc kubenswrapper[4903]: I1126 23:30:42.041354 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d16421f1-30a9-4960-88c7-a1bb896ea7b3" path="/var/lib/kubelet/pods/d16421f1-30a9-4960-88c7-a1bb896ea7b3/volumes" Nov 26 23:32:43 crc kubenswrapper[4903]: E1126 23:32:43.219038 4903 upgradeaware.go:441] Error proxying data from backend to client: writeto tcp 38.102.83.219:40692->38.102.83.219:36831: read tcp 38.102.83.219:40692->38.102.83.219:36831: read: connection reset by peer Nov 26 23:32:58 crc kubenswrapper[4903]: I1126 23:32:58.555296 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-g5dp9"] Nov 26 23:32:58 crc kubenswrapper[4903]: E1126 23:32:58.557381 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d16421f1-30a9-4960-88c7-a1bb896ea7b3" containerName="extract-content" Nov 26 23:32:58 crc kubenswrapper[4903]: I1126 23:32:58.557477 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="d16421f1-30a9-4960-88c7-a1bb896ea7b3" containerName="extract-content" Nov 26 23:32:58 crc kubenswrapper[4903]: E1126 23:32:58.557545 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d16421f1-30a9-4960-88c7-a1bb896ea7b3" containerName="registry-server" Nov 26 23:32:58 crc kubenswrapper[4903]: I1126 23:32:58.557603 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="d16421f1-30a9-4960-88c7-a1bb896ea7b3" containerName="registry-server" Nov 26 23:32:58 crc kubenswrapper[4903]: E1126 23:32:58.557749 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d16421f1-30a9-4960-88c7-a1bb896ea7b3" containerName="extract-utilities" Nov 26 23:32:58 crc kubenswrapper[4903]: I1126 23:32:58.557827 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="d16421f1-30a9-4960-88c7-a1bb896ea7b3" containerName="extract-utilities" Nov 26 23:32:58 crc kubenswrapper[4903]: I1126 23:32:58.558114 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="d16421f1-30a9-4960-88c7-a1bb896ea7b3" containerName="registry-server" Nov 26 23:32:58 crc kubenswrapper[4903]: I1126 23:32:58.560076 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g5dp9" Nov 26 23:32:58 crc kubenswrapper[4903]: I1126 23:32:58.570574 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-g5dp9"] Nov 26 23:32:58 crc kubenswrapper[4903]: I1126 23:32:58.657889 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/766f839b-36b4-4892-8d11-d3a5221c532d-catalog-content\") pod \"certified-operators-g5dp9\" (UID: \"766f839b-36b4-4892-8d11-d3a5221c532d\") " pod="openshift-marketplace/certified-operators-g5dp9" Nov 26 23:32:58 crc kubenswrapper[4903]: I1126 23:32:58.658557 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/766f839b-36b4-4892-8d11-d3a5221c532d-utilities\") pod \"certified-operators-g5dp9\" (UID: \"766f839b-36b4-4892-8d11-d3a5221c532d\") " pod="openshift-marketplace/certified-operators-g5dp9" Nov 26 23:32:58 crc kubenswrapper[4903]: I1126 23:32:58.658826 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4psrc\" (UniqueName: \"kubernetes.io/projected/766f839b-36b4-4892-8d11-d3a5221c532d-kube-api-access-4psrc\") pod \"certified-operators-g5dp9\" (UID: \"766f839b-36b4-4892-8d11-d3a5221c532d\") " pod="openshift-marketplace/certified-operators-g5dp9" Nov 26 23:32:58 crc kubenswrapper[4903]: I1126 23:32:58.761043 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4psrc\" (UniqueName: \"kubernetes.io/projected/766f839b-36b4-4892-8d11-d3a5221c532d-kube-api-access-4psrc\") pod \"certified-operators-g5dp9\" (UID: \"766f839b-36b4-4892-8d11-d3a5221c532d\") " pod="openshift-marketplace/certified-operators-g5dp9" Nov 26 23:32:58 crc kubenswrapper[4903]: I1126 23:32:58.761212 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/766f839b-36b4-4892-8d11-d3a5221c532d-catalog-content\") pod \"certified-operators-g5dp9\" (UID: \"766f839b-36b4-4892-8d11-d3a5221c532d\") " pod="openshift-marketplace/certified-operators-g5dp9" Nov 26 23:32:58 crc kubenswrapper[4903]: I1126 23:32:58.761251 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/766f839b-36b4-4892-8d11-d3a5221c532d-utilities\") pod \"certified-operators-g5dp9\" (UID: \"766f839b-36b4-4892-8d11-d3a5221c532d\") " pod="openshift-marketplace/certified-operators-g5dp9" Nov 26 23:32:58 crc kubenswrapper[4903]: I1126 23:32:58.761794 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/766f839b-36b4-4892-8d11-d3a5221c532d-utilities\") pod \"certified-operators-g5dp9\" (UID: \"766f839b-36b4-4892-8d11-d3a5221c532d\") " pod="openshift-marketplace/certified-operators-g5dp9" Nov 26 23:32:58 crc kubenswrapper[4903]: I1126 23:32:58.761895 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/766f839b-36b4-4892-8d11-d3a5221c532d-catalog-content\") pod \"certified-operators-g5dp9\" (UID: \"766f839b-36b4-4892-8d11-d3a5221c532d\") " pod="openshift-marketplace/certified-operators-g5dp9" Nov 26 23:32:58 crc kubenswrapper[4903]: I1126 23:32:58.781139 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4psrc\" (UniqueName: \"kubernetes.io/projected/766f839b-36b4-4892-8d11-d3a5221c532d-kube-api-access-4psrc\") pod \"certified-operators-g5dp9\" (UID: \"766f839b-36b4-4892-8d11-d3a5221c532d\") " pod="openshift-marketplace/certified-operators-g5dp9" Nov 26 23:32:58 crc kubenswrapper[4903]: I1126 23:32:58.899416 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g5dp9" Nov 26 23:32:59 crc kubenswrapper[4903]: I1126 23:32:59.433964 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-g5dp9"] Nov 26 23:32:59 crc kubenswrapper[4903]: I1126 23:32:59.946048 4903 generic.go:334] "Generic (PLEG): container finished" podID="766f839b-36b4-4892-8d11-d3a5221c532d" containerID="bebd357583f3d4975ee008d524851871cbd1b4491e7e1fe4127aff7e2e3cc4eb" exitCode=0 Nov 26 23:32:59 crc kubenswrapper[4903]: I1126 23:32:59.946378 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g5dp9" event={"ID":"766f839b-36b4-4892-8d11-d3a5221c532d","Type":"ContainerDied","Data":"bebd357583f3d4975ee008d524851871cbd1b4491e7e1fe4127aff7e2e3cc4eb"} Nov 26 23:32:59 crc kubenswrapper[4903]: I1126 23:32:59.946478 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g5dp9" event={"ID":"766f839b-36b4-4892-8d11-d3a5221c532d","Type":"ContainerStarted","Data":"044dad80c5d1245910c3a06c24c3ac15067f95c54ce58097d41bde7292f936fc"} Nov 26 23:33:01 crc kubenswrapper[4903]: I1126 23:33:01.972022 4903 generic.go:334] "Generic (PLEG): container finished" podID="766f839b-36b4-4892-8d11-d3a5221c532d" containerID="057c5ecf7a96b8fc80ab9e83f8b4f99f0d8c2209e31ab79c310a43a355cfdb51" exitCode=0 Nov 26 23:33:01 crc kubenswrapper[4903]: I1126 23:33:01.972254 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g5dp9" event={"ID":"766f839b-36b4-4892-8d11-d3a5221c532d","Type":"ContainerDied","Data":"057c5ecf7a96b8fc80ab9e83f8b4f99f0d8c2209e31ab79c310a43a355cfdb51"} Nov 26 23:33:01 crc kubenswrapper[4903]: I1126 23:33:01.981434 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 23:33:01 crc kubenswrapper[4903]: I1126 23:33:01.981471 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 23:33:02 crc kubenswrapper[4903]: I1126 23:33:02.987523 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g5dp9" event={"ID":"766f839b-36b4-4892-8d11-d3a5221c532d","Type":"ContainerStarted","Data":"acba29928fa2bffb3bd67b896319e944d8496218d7520f2920547c1b52142367"} Nov 26 23:33:03 crc kubenswrapper[4903]: I1126 23:33:03.009934 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-g5dp9" podStartSLOduration=2.511738543 podStartE2EDuration="5.009910994s" podCreationTimestamp="2025-11-26 23:32:58 +0000 UTC" firstStartedPulling="2025-11-26 23:32:59.948140948 +0000 UTC m=+4308.638375858" lastFinishedPulling="2025-11-26 23:33:02.446313399 +0000 UTC m=+4311.136548309" observedRunningTime="2025-11-26 23:33:03.003184454 +0000 UTC m=+4311.693419384" watchObservedRunningTime="2025-11-26 23:33:03.009910994 +0000 UTC m=+4311.700145914" Nov 26 23:33:08 crc kubenswrapper[4903]: I1126 23:33:08.899908 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-g5dp9" Nov 26 23:33:08 crc kubenswrapper[4903]: I1126 23:33:08.900533 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-g5dp9" Nov 26 23:33:08 crc kubenswrapper[4903]: I1126 23:33:08.968036 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-g5dp9" Nov 26 23:33:09 crc kubenswrapper[4903]: I1126 23:33:09.120513 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-g5dp9" Nov 26 23:33:09 crc kubenswrapper[4903]: I1126 23:33:09.226025 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-g5dp9"] Nov 26 23:33:11 crc kubenswrapper[4903]: I1126 23:33:11.101930 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-g5dp9" podUID="766f839b-36b4-4892-8d11-d3a5221c532d" containerName="registry-server" containerID="cri-o://acba29928fa2bffb3bd67b896319e944d8496218d7520f2920547c1b52142367" gracePeriod=2 Nov 26 23:33:11 crc kubenswrapper[4903]: I1126 23:33:11.738570 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g5dp9" Nov 26 23:33:11 crc kubenswrapper[4903]: I1126 23:33:11.867832 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4psrc\" (UniqueName: \"kubernetes.io/projected/766f839b-36b4-4892-8d11-d3a5221c532d-kube-api-access-4psrc\") pod \"766f839b-36b4-4892-8d11-d3a5221c532d\" (UID: \"766f839b-36b4-4892-8d11-d3a5221c532d\") " Nov 26 23:33:11 crc kubenswrapper[4903]: I1126 23:33:11.867921 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/766f839b-36b4-4892-8d11-d3a5221c532d-utilities\") pod \"766f839b-36b4-4892-8d11-d3a5221c532d\" (UID: \"766f839b-36b4-4892-8d11-d3a5221c532d\") " Nov 26 23:33:11 crc kubenswrapper[4903]: I1126 23:33:11.868170 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/766f839b-36b4-4892-8d11-d3a5221c532d-catalog-content\") pod \"766f839b-36b4-4892-8d11-d3a5221c532d\" (UID: \"766f839b-36b4-4892-8d11-d3a5221c532d\") " Nov 26 23:33:11 crc kubenswrapper[4903]: I1126 23:33:11.868926 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/766f839b-36b4-4892-8d11-d3a5221c532d-utilities" (OuterVolumeSpecName: "utilities") pod "766f839b-36b4-4892-8d11-d3a5221c532d" (UID: "766f839b-36b4-4892-8d11-d3a5221c532d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:33:11 crc kubenswrapper[4903]: I1126 23:33:11.881183 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/766f839b-36b4-4892-8d11-d3a5221c532d-kube-api-access-4psrc" (OuterVolumeSpecName: "kube-api-access-4psrc") pod "766f839b-36b4-4892-8d11-d3a5221c532d" (UID: "766f839b-36b4-4892-8d11-d3a5221c532d"). InnerVolumeSpecName "kube-api-access-4psrc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:33:11 crc kubenswrapper[4903]: I1126 23:33:11.924795 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/766f839b-36b4-4892-8d11-d3a5221c532d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "766f839b-36b4-4892-8d11-d3a5221c532d" (UID: "766f839b-36b4-4892-8d11-d3a5221c532d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:33:11 crc kubenswrapper[4903]: I1126 23:33:11.972985 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/766f839b-36b4-4892-8d11-d3a5221c532d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 23:33:11 crc kubenswrapper[4903]: I1126 23:33:11.973045 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4psrc\" (UniqueName: \"kubernetes.io/projected/766f839b-36b4-4892-8d11-d3a5221c532d-kube-api-access-4psrc\") on node \"crc\" DevicePath \"\"" Nov 26 23:33:11 crc kubenswrapper[4903]: I1126 23:33:11.973065 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/766f839b-36b4-4892-8d11-d3a5221c532d-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 23:33:12 crc kubenswrapper[4903]: I1126 23:33:12.116812 4903 generic.go:334] "Generic (PLEG): container finished" podID="766f839b-36b4-4892-8d11-d3a5221c532d" containerID="acba29928fa2bffb3bd67b896319e944d8496218d7520f2920547c1b52142367" exitCode=0 Nov 26 23:33:12 crc kubenswrapper[4903]: I1126 23:33:12.116847 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g5dp9" event={"ID":"766f839b-36b4-4892-8d11-d3a5221c532d","Type":"ContainerDied","Data":"acba29928fa2bffb3bd67b896319e944d8496218d7520f2920547c1b52142367"} Nov 26 23:33:12 crc kubenswrapper[4903]: I1126 23:33:12.116888 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g5dp9" event={"ID":"766f839b-36b4-4892-8d11-d3a5221c532d","Type":"ContainerDied","Data":"044dad80c5d1245910c3a06c24c3ac15067f95c54ce58097d41bde7292f936fc"} Nov 26 23:33:12 crc kubenswrapper[4903]: I1126 23:33:12.116907 4903 scope.go:117] "RemoveContainer" containerID="acba29928fa2bffb3bd67b896319e944d8496218d7520f2920547c1b52142367" Nov 26 23:33:12 crc kubenswrapper[4903]: I1126 23:33:12.116934 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g5dp9" Nov 26 23:33:12 crc kubenswrapper[4903]: I1126 23:33:12.167254 4903 scope.go:117] "RemoveContainer" containerID="057c5ecf7a96b8fc80ab9e83f8b4f99f0d8c2209e31ab79c310a43a355cfdb51" Nov 26 23:33:12 crc kubenswrapper[4903]: I1126 23:33:12.168375 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-g5dp9"] Nov 26 23:33:12 crc kubenswrapper[4903]: I1126 23:33:12.211137 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-g5dp9"] Nov 26 23:33:12 crc kubenswrapper[4903]: I1126 23:33:12.217549 4903 scope.go:117] "RemoveContainer" containerID="bebd357583f3d4975ee008d524851871cbd1b4491e7e1fe4127aff7e2e3cc4eb" Nov 26 23:33:12 crc kubenswrapper[4903]: I1126 23:33:12.263020 4903 scope.go:117] "RemoveContainer" containerID="acba29928fa2bffb3bd67b896319e944d8496218d7520f2920547c1b52142367" Nov 26 23:33:12 crc kubenswrapper[4903]: E1126 23:33:12.264174 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"acba29928fa2bffb3bd67b896319e944d8496218d7520f2920547c1b52142367\": container with ID starting with acba29928fa2bffb3bd67b896319e944d8496218d7520f2920547c1b52142367 not found: ID does not exist" containerID="acba29928fa2bffb3bd67b896319e944d8496218d7520f2920547c1b52142367" Nov 26 23:33:12 crc kubenswrapper[4903]: I1126 23:33:12.264212 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"acba29928fa2bffb3bd67b896319e944d8496218d7520f2920547c1b52142367"} err="failed to get container status \"acba29928fa2bffb3bd67b896319e944d8496218d7520f2920547c1b52142367\": rpc error: code = NotFound desc = could not find container \"acba29928fa2bffb3bd67b896319e944d8496218d7520f2920547c1b52142367\": container with ID starting with acba29928fa2bffb3bd67b896319e944d8496218d7520f2920547c1b52142367 not found: ID does not exist" Nov 26 23:33:12 crc kubenswrapper[4903]: I1126 23:33:12.264260 4903 scope.go:117] "RemoveContainer" containerID="057c5ecf7a96b8fc80ab9e83f8b4f99f0d8c2209e31ab79c310a43a355cfdb51" Nov 26 23:33:12 crc kubenswrapper[4903]: E1126 23:33:12.264639 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"057c5ecf7a96b8fc80ab9e83f8b4f99f0d8c2209e31ab79c310a43a355cfdb51\": container with ID starting with 057c5ecf7a96b8fc80ab9e83f8b4f99f0d8c2209e31ab79c310a43a355cfdb51 not found: ID does not exist" containerID="057c5ecf7a96b8fc80ab9e83f8b4f99f0d8c2209e31ab79c310a43a355cfdb51" Nov 26 23:33:12 crc kubenswrapper[4903]: I1126 23:33:12.264674 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"057c5ecf7a96b8fc80ab9e83f8b4f99f0d8c2209e31ab79c310a43a355cfdb51"} err="failed to get container status \"057c5ecf7a96b8fc80ab9e83f8b4f99f0d8c2209e31ab79c310a43a355cfdb51\": rpc error: code = NotFound desc = could not find container \"057c5ecf7a96b8fc80ab9e83f8b4f99f0d8c2209e31ab79c310a43a355cfdb51\": container with ID starting with 057c5ecf7a96b8fc80ab9e83f8b4f99f0d8c2209e31ab79c310a43a355cfdb51 not found: ID does not exist" Nov 26 23:33:12 crc kubenswrapper[4903]: I1126 23:33:12.264707 4903 scope.go:117] "RemoveContainer" containerID="bebd357583f3d4975ee008d524851871cbd1b4491e7e1fe4127aff7e2e3cc4eb" Nov 26 23:33:12 crc kubenswrapper[4903]: E1126 23:33:12.265168 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bebd357583f3d4975ee008d524851871cbd1b4491e7e1fe4127aff7e2e3cc4eb\": container with ID starting with bebd357583f3d4975ee008d524851871cbd1b4491e7e1fe4127aff7e2e3cc4eb not found: ID does not exist" containerID="bebd357583f3d4975ee008d524851871cbd1b4491e7e1fe4127aff7e2e3cc4eb" Nov 26 23:33:12 crc kubenswrapper[4903]: I1126 23:33:12.265226 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bebd357583f3d4975ee008d524851871cbd1b4491e7e1fe4127aff7e2e3cc4eb"} err="failed to get container status \"bebd357583f3d4975ee008d524851871cbd1b4491e7e1fe4127aff7e2e3cc4eb\": rpc error: code = NotFound desc = could not find container \"bebd357583f3d4975ee008d524851871cbd1b4491e7e1fe4127aff7e2e3cc4eb\": container with ID starting with bebd357583f3d4975ee008d524851871cbd1b4491e7e1fe4127aff7e2e3cc4eb not found: ID does not exist" Nov 26 23:33:14 crc kubenswrapper[4903]: I1126 23:33:14.056614 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="766f839b-36b4-4892-8d11-d3a5221c532d" path="/var/lib/kubelet/pods/766f839b-36b4-4892-8d11-d3a5221c532d/volumes" Nov 26 23:33:14 crc kubenswrapper[4903]: E1126 23:33:14.781823 4903 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod766f839b_36b4_4892_8d11_d3a5221c532d.slice/crio-044dad80c5d1245910c3a06c24c3ac15067f95c54ce58097d41bde7292f936fc\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod766f839b_36b4_4892_8d11_d3a5221c532d.slice\": RecentStats: unable to find data in memory cache]" Nov 26 23:33:15 crc kubenswrapper[4903]: E1126 23:33:15.894934 4903 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod766f839b_36b4_4892_8d11_d3a5221c532d.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod766f839b_36b4_4892_8d11_d3a5221c532d.slice/crio-044dad80c5d1245910c3a06c24c3ac15067f95c54ce58097d41bde7292f936fc\": RecentStats: unable to find data in memory cache]" Nov 26 23:33:26 crc kubenswrapper[4903]: E1126 23:33:26.207427 4903 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod766f839b_36b4_4892_8d11_d3a5221c532d.slice/crio-044dad80c5d1245910c3a06c24c3ac15067f95c54ce58097d41bde7292f936fc\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod766f839b_36b4_4892_8d11_d3a5221c532d.slice\": RecentStats: unable to find data in memory cache]" Nov 26 23:33:29 crc kubenswrapper[4903]: E1126 23:33:29.547187 4903 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod766f839b_36b4_4892_8d11_d3a5221c532d.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod766f839b_36b4_4892_8d11_d3a5221c532d.slice/crio-044dad80c5d1245910c3a06c24c3ac15067f95c54ce58097d41bde7292f936fc\": RecentStats: unable to find data in memory cache]" Nov 26 23:33:31 crc kubenswrapper[4903]: I1126 23:33:31.981816 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 23:33:31 crc kubenswrapper[4903]: I1126 23:33:31.982189 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 23:33:36 crc kubenswrapper[4903]: E1126 23:33:36.522540 4903 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod766f839b_36b4_4892_8d11_d3a5221c532d.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod766f839b_36b4_4892_8d11_d3a5221c532d.slice/crio-044dad80c5d1245910c3a06c24c3ac15067f95c54ce58097d41bde7292f936fc\": RecentStats: unable to find data in memory cache]" Nov 26 23:33:44 crc kubenswrapper[4903]: E1126 23:33:44.841577 4903 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod766f839b_36b4_4892_8d11_d3a5221c532d.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod766f839b_36b4_4892_8d11_d3a5221c532d.slice/crio-044dad80c5d1245910c3a06c24c3ac15067f95c54ce58097d41bde7292f936fc\": RecentStats: unable to find data in memory cache]" Nov 26 23:33:46 crc kubenswrapper[4903]: E1126 23:33:46.569934 4903 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod766f839b_36b4_4892_8d11_d3a5221c532d.slice/crio-044dad80c5d1245910c3a06c24c3ac15067f95c54ce58097d41bde7292f936fc\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod766f839b_36b4_4892_8d11_d3a5221c532d.slice\": RecentStats: unable to find data in memory cache]" Nov 26 23:33:48 crc kubenswrapper[4903]: E1126 23:33:48.105345 4903 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod766f839b_36b4_4892_8d11_d3a5221c532d.slice/crio-044dad80c5d1245910c3a06c24c3ac15067f95c54ce58097d41bde7292f936fc\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod766f839b_36b4_4892_8d11_d3a5221c532d.slice\": RecentStats: unable to find data in memory cache]" Nov 26 23:33:48 crc kubenswrapper[4903]: E1126 23:33:48.107095 4903 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod766f839b_36b4_4892_8d11_d3a5221c532d.slice/crio-044dad80c5d1245910c3a06c24c3ac15067f95c54ce58097d41bde7292f936fc\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod766f839b_36b4_4892_8d11_d3a5221c532d.slice\": RecentStats: unable to find data in memory cache]" Nov 26 23:33:54 crc kubenswrapper[4903]: E1126 23:33:54.440553 4903 upgradeaware.go:441] Error proxying data from backend to client: writeto tcp 38.102.83.219:52576->38.102.83.219:36831: read tcp 38.102.83.219:52576->38.102.83.219:36831: read: connection reset by peer Nov 26 23:33:56 crc kubenswrapper[4903]: E1126 23:33:56.849444 4903 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod766f839b_36b4_4892_8d11_d3a5221c532d.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod766f839b_36b4_4892_8d11_d3a5221c532d.slice/crio-044dad80c5d1245910c3a06c24c3ac15067f95c54ce58097d41bde7292f936fc\": RecentStats: unable to find data in memory cache]" Nov 26 23:33:59 crc kubenswrapper[4903]: E1126 23:33:59.550843 4903 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod766f839b_36b4_4892_8d11_d3a5221c532d.slice/crio-044dad80c5d1245910c3a06c24c3ac15067f95c54ce58097d41bde7292f936fc\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod766f839b_36b4_4892_8d11_d3a5221c532d.slice\": RecentStats: unable to find data in memory cache]" Nov 26 23:34:01 crc kubenswrapper[4903]: I1126 23:34:01.980993 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 23:34:01 crc kubenswrapper[4903]: I1126 23:34:01.981573 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 23:34:01 crc kubenswrapper[4903]: I1126 23:34:01.981620 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 23:34:01 crc kubenswrapper[4903]: I1126 23:34:01.982470 4903 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"534fbdcf6d0ec22c7d2eaa85f7e96a569f1e62972595e741c0ca79bbdc1f5967"} pod="openshift-machine-config-operator/machine-config-daemon-wjwph" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 23:34:01 crc kubenswrapper[4903]: I1126 23:34:01.982514 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" containerID="cri-o://534fbdcf6d0ec22c7d2eaa85f7e96a569f1e62972595e741c0ca79bbdc1f5967" gracePeriod=600 Nov 26 23:34:02 crc kubenswrapper[4903]: I1126 23:34:02.816807 4903 generic.go:334] "Generic (PLEG): container finished" podID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerID="534fbdcf6d0ec22c7d2eaa85f7e96a569f1e62972595e741c0ca79bbdc1f5967" exitCode=0 Nov 26 23:34:02 crc kubenswrapper[4903]: I1126 23:34:02.816871 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerDied","Data":"534fbdcf6d0ec22c7d2eaa85f7e96a569f1e62972595e741c0ca79bbdc1f5967"} Nov 26 23:34:02 crc kubenswrapper[4903]: I1126 23:34:02.817403 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerStarted","Data":"1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930"} Nov 26 23:34:02 crc kubenswrapper[4903]: I1126 23:34:02.817553 4903 scope.go:117] "RemoveContainer" containerID="acfd4a2ae7045f3f2861931605903a0b18e4feb51ce042e8d16ef829eb71a8d6" Nov 26 23:34:06 crc kubenswrapper[4903]: E1126 23:34:06.893804 4903 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod766f839b_36b4_4892_8d11_d3a5221c532d.slice/crio-044dad80c5d1245910c3a06c24c3ac15067f95c54ce58097d41bde7292f936fc\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod766f839b_36b4_4892_8d11_d3a5221c532d.slice\": RecentStats: unable to find data in memory cache]" Nov 26 23:34:12 crc kubenswrapper[4903]: E1126 23:34:12.084926 4903 fsHandler.go:119] failed to collect filesystem stats - rootDiskErr: could not stat "/var/lib/containers/storage/overlay/46536e53ede0eaa8e2b25280c2826cf1e522c88faf1c22b804c553b07498c20d/diff" to get inode usage: stat /var/lib/containers/storage/overlay/46536e53ede0eaa8e2b25280c2826cf1e522c88faf1c22b804c553b07498c20d/diff: no such file or directory, extraDiskErr: Nov 26 23:36:31 crc kubenswrapper[4903]: I1126 23:36:31.981918 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 23:36:31 crc kubenswrapper[4903]: I1126 23:36:31.982844 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 23:37:01 crc kubenswrapper[4903]: I1126 23:37:01.980962 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 23:37:01 crc kubenswrapper[4903]: I1126 23:37:01.981632 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 23:37:14 crc kubenswrapper[4903]: I1126 23:37:14.357987 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qftwc"] Nov 26 23:37:14 crc kubenswrapper[4903]: E1126 23:37:14.359505 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="766f839b-36b4-4892-8d11-d3a5221c532d" containerName="registry-server" Nov 26 23:37:14 crc kubenswrapper[4903]: I1126 23:37:14.359524 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="766f839b-36b4-4892-8d11-d3a5221c532d" containerName="registry-server" Nov 26 23:37:14 crc kubenswrapper[4903]: E1126 23:37:14.359973 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="766f839b-36b4-4892-8d11-d3a5221c532d" containerName="extract-content" Nov 26 23:37:14 crc kubenswrapper[4903]: I1126 23:37:14.359987 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="766f839b-36b4-4892-8d11-d3a5221c532d" containerName="extract-content" Nov 26 23:37:14 crc kubenswrapper[4903]: E1126 23:37:14.360050 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="766f839b-36b4-4892-8d11-d3a5221c532d" containerName="extract-utilities" Nov 26 23:37:14 crc kubenswrapper[4903]: I1126 23:37:14.360062 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="766f839b-36b4-4892-8d11-d3a5221c532d" containerName="extract-utilities" Nov 26 23:37:14 crc kubenswrapper[4903]: I1126 23:37:14.360383 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="766f839b-36b4-4892-8d11-d3a5221c532d" containerName="registry-server" Nov 26 23:37:14 crc kubenswrapper[4903]: I1126 23:37:14.363882 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qftwc" Nov 26 23:37:14 crc kubenswrapper[4903]: I1126 23:37:14.395171 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qftwc"] Nov 26 23:37:14 crc kubenswrapper[4903]: I1126 23:37:14.509628 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/081461ec-b191-470c-b882-ae4772795f26-utilities\") pod \"redhat-marketplace-qftwc\" (UID: \"081461ec-b191-470c-b882-ae4772795f26\") " pod="openshift-marketplace/redhat-marketplace-qftwc" Nov 26 23:37:14 crc kubenswrapper[4903]: I1126 23:37:14.509726 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qqt54\" (UniqueName: \"kubernetes.io/projected/081461ec-b191-470c-b882-ae4772795f26-kube-api-access-qqt54\") pod \"redhat-marketplace-qftwc\" (UID: \"081461ec-b191-470c-b882-ae4772795f26\") " pod="openshift-marketplace/redhat-marketplace-qftwc" Nov 26 23:37:14 crc kubenswrapper[4903]: I1126 23:37:14.509802 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/081461ec-b191-470c-b882-ae4772795f26-catalog-content\") pod \"redhat-marketplace-qftwc\" (UID: \"081461ec-b191-470c-b882-ae4772795f26\") " pod="openshift-marketplace/redhat-marketplace-qftwc" Nov 26 23:37:14 crc kubenswrapper[4903]: I1126 23:37:14.611624 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/081461ec-b191-470c-b882-ae4772795f26-utilities\") pod \"redhat-marketplace-qftwc\" (UID: \"081461ec-b191-470c-b882-ae4772795f26\") " pod="openshift-marketplace/redhat-marketplace-qftwc" Nov 26 23:37:14 crc kubenswrapper[4903]: I1126 23:37:14.611752 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qqt54\" (UniqueName: \"kubernetes.io/projected/081461ec-b191-470c-b882-ae4772795f26-kube-api-access-qqt54\") pod \"redhat-marketplace-qftwc\" (UID: \"081461ec-b191-470c-b882-ae4772795f26\") " pod="openshift-marketplace/redhat-marketplace-qftwc" Nov 26 23:37:14 crc kubenswrapper[4903]: I1126 23:37:14.611856 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/081461ec-b191-470c-b882-ae4772795f26-catalog-content\") pod \"redhat-marketplace-qftwc\" (UID: \"081461ec-b191-470c-b882-ae4772795f26\") " pod="openshift-marketplace/redhat-marketplace-qftwc" Nov 26 23:37:14 crc kubenswrapper[4903]: I1126 23:37:14.612311 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/081461ec-b191-470c-b882-ae4772795f26-catalog-content\") pod \"redhat-marketplace-qftwc\" (UID: \"081461ec-b191-470c-b882-ae4772795f26\") " pod="openshift-marketplace/redhat-marketplace-qftwc" Nov 26 23:37:14 crc kubenswrapper[4903]: I1126 23:37:14.612305 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/081461ec-b191-470c-b882-ae4772795f26-utilities\") pod \"redhat-marketplace-qftwc\" (UID: \"081461ec-b191-470c-b882-ae4772795f26\") " pod="openshift-marketplace/redhat-marketplace-qftwc" Nov 26 23:37:14 crc kubenswrapper[4903]: I1126 23:37:14.629674 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qqt54\" (UniqueName: \"kubernetes.io/projected/081461ec-b191-470c-b882-ae4772795f26-kube-api-access-qqt54\") pod \"redhat-marketplace-qftwc\" (UID: \"081461ec-b191-470c-b882-ae4772795f26\") " pod="openshift-marketplace/redhat-marketplace-qftwc" Nov 26 23:37:14 crc kubenswrapper[4903]: I1126 23:37:14.703079 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qftwc" Nov 26 23:37:15 crc kubenswrapper[4903]: I1126 23:37:15.193285 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qftwc"] Nov 26 23:37:15 crc kubenswrapper[4903]: I1126 23:37:15.507725 4903 generic.go:334] "Generic (PLEG): container finished" podID="081461ec-b191-470c-b882-ae4772795f26" containerID="9d1c8fd6c67b0a20c84beda314da2ca17c3e4f732f2483263e2f1f5bffbdd399" exitCode=0 Nov 26 23:37:15 crc kubenswrapper[4903]: I1126 23:37:15.507971 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qftwc" event={"ID":"081461ec-b191-470c-b882-ae4772795f26","Type":"ContainerDied","Data":"9d1c8fd6c67b0a20c84beda314da2ca17c3e4f732f2483263e2f1f5bffbdd399"} Nov 26 23:37:15 crc kubenswrapper[4903]: I1126 23:37:15.508001 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qftwc" event={"ID":"081461ec-b191-470c-b882-ae4772795f26","Type":"ContainerStarted","Data":"025f0ff4561e05fce90261fd88870e6030bbb6f7ea074842ce93547b2c0da7f2"} Nov 26 23:37:15 crc kubenswrapper[4903]: I1126 23:37:15.511508 4903 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 23:37:17 crc kubenswrapper[4903]: I1126 23:37:17.543568 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qftwc" event={"ID":"081461ec-b191-470c-b882-ae4772795f26","Type":"ContainerStarted","Data":"3622e966d22c2431ea67ff8936b54985f9c5ac74f12891a81081df2bd8c70e2b"} Nov 26 23:37:18 crc kubenswrapper[4903]: I1126 23:37:18.565553 4903 generic.go:334] "Generic (PLEG): container finished" podID="081461ec-b191-470c-b882-ae4772795f26" containerID="3622e966d22c2431ea67ff8936b54985f9c5ac74f12891a81081df2bd8c70e2b" exitCode=0 Nov 26 23:37:18 crc kubenswrapper[4903]: I1126 23:37:18.565622 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qftwc" event={"ID":"081461ec-b191-470c-b882-ae4772795f26","Type":"ContainerDied","Data":"3622e966d22c2431ea67ff8936b54985f9c5ac74f12891a81081df2bd8c70e2b"} Nov 26 23:37:20 crc kubenswrapper[4903]: I1126 23:37:20.599068 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qftwc" event={"ID":"081461ec-b191-470c-b882-ae4772795f26","Type":"ContainerStarted","Data":"e6e63fc0c179b6c938df041cf0d6ce696b47799f071cf2b8267ffbaa7e8be58f"} Nov 26 23:37:20 crc kubenswrapper[4903]: I1126 23:37:20.628219 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qftwc" podStartSLOduration=2.808124582 podStartE2EDuration="6.62818811s" podCreationTimestamp="2025-11-26 23:37:14 +0000 UTC" firstStartedPulling="2025-11-26 23:37:15.510075058 +0000 UTC m=+4564.200309968" lastFinishedPulling="2025-11-26 23:37:19.330138556 +0000 UTC m=+4568.020373496" observedRunningTime="2025-11-26 23:37:20.624248514 +0000 UTC m=+4569.314483464" watchObservedRunningTime="2025-11-26 23:37:20.62818811 +0000 UTC m=+4569.318423060" Nov 26 23:37:24 crc kubenswrapper[4903]: I1126 23:37:24.703748 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-qftwc" Nov 26 23:37:24 crc kubenswrapper[4903]: I1126 23:37:24.704349 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-qftwc" Nov 26 23:37:24 crc kubenswrapper[4903]: I1126 23:37:24.798405 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-qftwc" Nov 26 23:37:25 crc kubenswrapper[4903]: I1126 23:37:25.751815 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-qftwc" Nov 26 23:37:25 crc kubenswrapper[4903]: I1126 23:37:25.819822 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qftwc"] Nov 26 23:37:27 crc kubenswrapper[4903]: I1126 23:37:27.701136 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-qftwc" podUID="081461ec-b191-470c-b882-ae4772795f26" containerName="registry-server" containerID="cri-o://e6e63fc0c179b6c938df041cf0d6ce696b47799f071cf2b8267ffbaa7e8be58f" gracePeriod=2 Nov 26 23:37:28 crc kubenswrapper[4903]: I1126 23:37:28.273896 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qftwc" Nov 26 23:37:28 crc kubenswrapper[4903]: I1126 23:37:28.402684 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qqt54\" (UniqueName: \"kubernetes.io/projected/081461ec-b191-470c-b882-ae4772795f26-kube-api-access-qqt54\") pod \"081461ec-b191-470c-b882-ae4772795f26\" (UID: \"081461ec-b191-470c-b882-ae4772795f26\") " Nov 26 23:37:28 crc kubenswrapper[4903]: I1126 23:37:28.402797 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/081461ec-b191-470c-b882-ae4772795f26-utilities\") pod \"081461ec-b191-470c-b882-ae4772795f26\" (UID: \"081461ec-b191-470c-b882-ae4772795f26\") " Nov 26 23:37:28 crc kubenswrapper[4903]: I1126 23:37:28.402881 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/081461ec-b191-470c-b882-ae4772795f26-catalog-content\") pod \"081461ec-b191-470c-b882-ae4772795f26\" (UID: \"081461ec-b191-470c-b882-ae4772795f26\") " Nov 26 23:37:28 crc kubenswrapper[4903]: I1126 23:37:28.403784 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/081461ec-b191-470c-b882-ae4772795f26-utilities" (OuterVolumeSpecName: "utilities") pod "081461ec-b191-470c-b882-ae4772795f26" (UID: "081461ec-b191-470c-b882-ae4772795f26"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:37:28 crc kubenswrapper[4903]: I1126 23:37:28.410400 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/081461ec-b191-470c-b882-ae4772795f26-kube-api-access-qqt54" (OuterVolumeSpecName: "kube-api-access-qqt54") pod "081461ec-b191-470c-b882-ae4772795f26" (UID: "081461ec-b191-470c-b882-ae4772795f26"). InnerVolumeSpecName "kube-api-access-qqt54". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:37:28 crc kubenswrapper[4903]: I1126 23:37:28.428135 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/081461ec-b191-470c-b882-ae4772795f26-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "081461ec-b191-470c-b882-ae4772795f26" (UID: "081461ec-b191-470c-b882-ae4772795f26"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:37:28 crc kubenswrapper[4903]: I1126 23:37:28.506528 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qqt54\" (UniqueName: \"kubernetes.io/projected/081461ec-b191-470c-b882-ae4772795f26-kube-api-access-qqt54\") on node \"crc\" DevicePath \"\"" Nov 26 23:37:28 crc kubenswrapper[4903]: I1126 23:37:28.506571 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/081461ec-b191-470c-b882-ae4772795f26-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 23:37:28 crc kubenswrapper[4903]: I1126 23:37:28.506587 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/081461ec-b191-470c-b882-ae4772795f26-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 23:37:28 crc kubenswrapper[4903]: I1126 23:37:28.719423 4903 generic.go:334] "Generic (PLEG): container finished" podID="081461ec-b191-470c-b882-ae4772795f26" containerID="e6e63fc0c179b6c938df041cf0d6ce696b47799f071cf2b8267ffbaa7e8be58f" exitCode=0 Nov 26 23:37:28 crc kubenswrapper[4903]: I1126 23:37:28.719714 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qftwc" event={"ID":"081461ec-b191-470c-b882-ae4772795f26","Type":"ContainerDied","Data":"e6e63fc0c179b6c938df041cf0d6ce696b47799f071cf2b8267ffbaa7e8be58f"} Nov 26 23:37:28 crc kubenswrapper[4903]: I1126 23:37:28.719741 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qftwc" event={"ID":"081461ec-b191-470c-b882-ae4772795f26","Type":"ContainerDied","Data":"025f0ff4561e05fce90261fd88870e6030bbb6f7ea074842ce93547b2c0da7f2"} Nov 26 23:37:28 crc kubenswrapper[4903]: I1126 23:37:28.719757 4903 scope.go:117] "RemoveContainer" containerID="e6e63fc0c179b6c938df041cf0d6ce696b47799f071cf2b8267ffbaa7e8be58f" Nov 26 23:37:28 crc kubenswrapper[4903]: I1126 23:37:28.719906 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qftwc" Nov 26 23:37:28 crc kubenswrapper[4903]: I1126 23:37:28.747995 4903 scope.go:117] "RemoveContainer" containerID="3622e966d22c2431ea67ff8936b54985f9c5ac74f12891a81081df2bd8c70e2b" Nov 26 23:37:28 crc kubenswrapper[4903]: I1126 23:37:28.778834 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qftwc"] Nov 26 23:37:28 crc kubenswrapper[4903]: I1126 23:37:28.788883 4903 scope.go:117] "RemoveContainer" containerID="9d1c8fd6c67b0a20c84beda314da2ca17c3e4f732f2483263e2f1f5bffbdd399" Nov 26 23:37:28 crc kubenswrapper[4903]: I1126 23:37:28.789943 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-qftwc"] Nov 26 23:37:28 crc kubenswrapper[4903]: I1126 23:37:28.854832 4903 scope.go:117] "RemoveContainer" containerID="e6e63fc0c179b6c938df041cf0d6ce696b47799f071cf2b8267ffbaa7e8be58f" Nov 26 23:37:28 crc kubenswrapper[4903]: E1126 23:37:28.855727 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e6e63fc0c179b6c938df041cf0d6ce696b47799f071cf2b8267ffbaa7e8be58f\": container with ID starting with e6e63fc0c179b6c938df041cf0d6ce696b47799f071cf2b8267ffbaa7e8be58f not found: ID does not exist" containerID="e6e63fc0c179b6c938df041cf0d6ce696b47799f071cf2b8267ffbaa7e8be58f" Nov 26 23:37:28 crc kubenswrapper[4903]: I1126 23:37:28.855784 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e6e63fc0c179b6c938df041cf0d6ce696b47799f071cf2b8267ffbaa7e8be58f"} err="failed to get container status \"e6e63fc0c179b6c938df041cf0d6ce696b47799f071cf2b8267ffbaa7e8be58f\": rpc error: code = NotFound desc = could not find container \"e6e63fc0c179b6c938df041cf0d6ce696b47799f071cf2b8267ffbaa7e8be58f\": container with ID starting with e6e63fc0c179b6c938df041cf0d6ce696b47799f071cf2b8267ffbaa7e8be58f not found: ID does not exist" Nov 26 23:37:28 crc kubenswrapper[4903]: I1126 23:37:28.855822 4903 scope.go:117] "RemoveContainer" containerID="3622e966d22c2431ea67ff8936b54985f9c5ac74f12891a81081df2bd8c70e2b" Nov 26 23:37:28 crc kubenswrapper[4903]: E1126 23:37:28.856290 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3622e966d22c2431ea67ff8936b54985f9c5ac74f12891a81081df2bd8c70e2b\": container with ID starting with 3622e966d22c2431ea67ff8936b54985f9c5ac74f12891a81081df2bd8c70e2b not found: ID does not exist" containerID="3622e966d22c2431ea67ff8936b54985f9c5ac74f12891a81081df2bd8c70e2b" Nov 26 23:37:28 crc kubenswrapper[4903]: I1126 23:37:28.856344 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3622e966d22c2431ea67ff8936b54985f9c5ac74f12891a81081df2bd8c70e2b"} err="failed to get container status \"3622e966d22c2431ea67ff8936b54985f9c5ac74f12891a81081df2bd8c70e2b\": rpc error: code = NotFound desc = could not find container \"3622e966d22c2431ea67ff8936b54985f9c5ac74f12891a81081df2bd8c70e2b\": container with ID starting with 3622e966d22c2431ea67ff8936b54985f9c5ac74f12891a81081df2bd8c70e2b not found: ID does not exist" Nov 26 23:37:28 crc kubenswrapper[4903]: I1126 23:37:28.856384 4903 scope.go:117] "RemoveContainer" containerID="9d1c8fd6c67b0a20c84beda314da2ca17c3e4f732f2483263e2f1f5bffbdd399" Nov 26 23:37:28 crc kubenswrapper[4903]: E1126 23:37:28.856949 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d1c8fd6c67b0a20c84beda314da2ca17c3e4f732f2483263e2f1f5bffbdd399\": container with ID starting with 9d1c8fd6c67b0a20c84beda314da2ca17c3e4f732f2483263e2f1f5bffbdd399 not found: ID does not exist" containerID="9d1c8fd6c67b0a20c84beda314da2ca17c3e4f732f2483263e2f1f5bffbdd399" Nov 26 23:37:28 crc kubenswrapper[4903]: I1126 23:37:28.856983 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d1c8fd6c67b0a20c84beda314da2ca17c3e4f732f2483263e2f1f5bffbdd399"} err="failed to get container status \"9d1c8fd6c67b0a20c84beda314da2ca17c3e4f732f2483263e2f1f5bffbdd399\": rpc error: code = NotFound desc = could not find container \"9d1c8fd6c67b0a20c84beda314da2ca17c3e4f732f2483263e2f1f5bffbdd399\": container with ID starting with 9d1c8fd6c67b0a20c84beda314da2ca17c3e4f732f2483263e2f1f5bffbdd399 not found: ID does not exist" Nov 26 23:37:30 crc kubenswrapper[4903]: I1126 23:37:30.047684 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="081461ec-b191-470c-b882-ae4772795f26" path="/var/lib/kubelet/pods/081461ec-b191-470c-b882-ae4772795f26/volumes" Nov 26 23:37:31 crc kubenswrapper[4903]: I1126 23:37:31.981288 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 23:37:31 crc kubenswrapper[4903]: I1126 23:37:31.981803 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 23:37:31 crc kubenswrapper[4903]: I1126 23:37:31.981876 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 23:37:31 crc kubenswrapper[4903]: I1126 23:37:31.982946 4903 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930"} pod="openshift-machine-config-operator/machine-config-daemon-wjwph" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 23:37:31 crc kubenswrapper[4903]: I1126 23:37:31.983043 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" containerID="cri-o://1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930" gracePeriod=600 Nov 26 23:37:32 crc kubenswrapper[4903]: E1126 23:37:32.234221 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:37:32 crc kubenswrapper[4903]: I1126 23:37:32.790230 4903 generic.go:334] "Generic (PLEG): container finished" podID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerID="1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930" exitCode=0 Nov 26 23:37:32 crc kubenswrapper[4903]: I1126 23:37:32.790290 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerDied","Data":"1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930"} Nov 26 23:37:32 crc kubenswrapper[4903]: I1126 23:37:32.790348 4903 scope.go:117] "RemoveContainer" containerID="534fbdcf6d0ec22c7d2eaa85f7e96a569f1e62972595e741c0ca79bbdc1f5967" Nov 26 23:37:32 crc kubenswrapper[4903]: I1126 23:37:32.790930 4903 scope.go:117] "RemoveContainer" containerID="1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930" Nov 26 23:37:32 crc kubenswrapper[4903]: E1126 23:37:32.791432 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:37:46 crc kubenswrapper[4903]: I1126 23:37:46.028596 4903 scope.go:117] "RemoveContainer" containerID="1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930" Nov 26 23:37:46 crc kubenswrapper[4903]: E1126 23:37:46.029275 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:38:01 crc kubenswrapper[4903]: I1126 23:38:01.029631 4903 scope.go:117] "RemoveContainer" containerID="1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930" Nov 26 23:38:01 crc kubenswrapper[4903]: E1126 23:38:01.031337 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:38:12 crc kubenswrapper[4903]: I1126 23:38:12.035871 4903 scope.go:117] "RemoveContainer" containerID="1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930" Nov 26 23:38:12 crc kubenswrapper[4903]: E1126 23:38:12.036454 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:38:25 crc kubenswrapper[4903]: I1126 23:38:25.028322 4903 scope.go:117] "RemoveContainer" containerID="1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930" Nov 26 23:38:25 crc kubenswrapper[4903]: E1126 23:38:25.029472 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:38:36 crc kubenswrapper[4903]: I1126 23:38:36.029657 4903 scope.go:117] "RemoveContainer" containerID="1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930" Nov 26 23:38:36 crc kubenswrapper[4903]: E1126 23:38:36.031046 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:38:49 crc kubenswrapper[4903]: I1126 23:38:49.029255 4903 scope.go:117] "RemoveContainer" containerID="1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930" Nov 26 23:38:49 crc kubenswrapper[4903]: E1126 23:38:49.030324 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.236902 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Nov 26 23:38:59 crc kubenswrapper[4903]: E1126 23:38:59.238785 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="081461ec-b191-470c-b882-ae4772795f26" containerName="registry-server" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.238822 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="081461ec-b191-470c-b882-ae4772795f26" containerName="registry-server" Nov 26 23:38:59 crc kubenswrapper[4903]: E1126 23:38:59.238883 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="081461ec-b191-470c-b882-ae4772795f26" containerName="extract-utilities" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.238900 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="081461ec-b191-470c-b882-ae4772795f26" containerName="extract-utilities" Nov 26 23:38:59 crc kubenswrapper[4903]: E1126 23:38:59.238975 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="081461ec-b191-470c-b882-ae4772795f26" containerName="extract-content" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.238993 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="081461ec-b191-470c-b882-ae4772795f26" containerName="extract-content" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.239555 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="081461ec-b191-470c-b882-ae4772795f26" containerName="registry-server" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.241498 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.244348 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.246042 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.246234 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-q4vxt" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.246250 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.259022 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.344113 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " pod="openstack/tempest-tests-tempest" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.344189 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " pod="openstack/tempest-tests-tempest" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.344239 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " pod="openstack/tempest-tests-tempest" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.344343 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " pod="openstack/tempest-tests-tempest" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.344861 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " pod="openstack/tempest-tests-tempest" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.344922 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " pod="openstack/tempest-tests-tempest" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.345094 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-config-data\") pod \"tempest-tests-tempest\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " pod="openstack/tempest-tests-tempest" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.345464 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7d4b7\" (UniqueName: \"kubernetes.io/projected/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-kube-api-access-7d4b7\") pod \"tempest-tests-tempest\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " pod="openstack/tempest-tests-tempest" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.345566 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"tempest-tests-tempest\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " pod="openstack/tempest-tests-tempest" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.447815 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " pod="openstack/tempest-tests-tempest" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.447876 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " pod="openstack/tempest-tests-tempest" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.447907 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " pod="openstack/tempest-tests-tempest" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.448025 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " pod="openstack/tempest-tests-tempest" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.448079 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " pod="openstack/tempest-tests-tempest" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.448101 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-config-data\") pod \"tempest-tests-tempest\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " pod="openstack/tempest-tests-tempest" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.449111 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7d4b7\" (UniqueName: \"kubernetes.io/projected/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-kube-api-access-7d4b7\") pod \"tempest-tests-tempest\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " pod="openstack/tempest-tests-tempest" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.449149 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"tempest-tests-tempest\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " pod="openstack/tempest-tests-tempest" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.448468 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " pod="openstack/tempest-tests-tempest" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.449257 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-config-data\") pod \"tempest-tests-tempest\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " pod="openstack/tempest-tests-tempest" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.449256 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " pod="openstack/tempest-tests-tempest" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.449318 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " pod="openstack/tempest-tests-tempest" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.449579 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " pod="openstack/tempest-tests-tempest" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.450335 4903 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"tempest-tests-tempest\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/tempest-tests-tempest" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.455498 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " pod="openstack/tempest-tests-tempest" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.455744 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " pod="openstack/tempest-tests-tempest" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.459354 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " pod="openstack/tempest-tests-tempest" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.475415 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7d4b7\" (UniqueName: \"kubernetes.io/projected/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-kube-api-access-7d4b7\") pod \"tempest-tests-tempest\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " pod="openstack/tempest-tests-tempest" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.487657 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"tempest-tests-tempest\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " pod="openstack/tempest-tests-tempest" Nov 26 23:38:59 crc kubenswrapper[4903]: I1126 23:38:59.577465 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 26 23:39:00 crc kubenswrapper[4903]: I1126 23:39:00.093739 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 26 23:39:00 crc kubenswrapper[4903]: I1126 23:39:00.957473 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"47c0a41f-61f3-4e6c-8367-a25c5a75d02b","Type":"ContainerStarted","Data":"56fd6ff5b657fbd9c84224c202687f125b4a044f13b80a30c22d4eb0781e53dd"} Nov 26 23:39:03 crc kubenswrapper[4903]: I1126 23:39:03.028774 4903 scope.go:117] "RemoveContainer" containerID="1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930" Nov 26 23:39:03 crc kubenswrapper[4903]: E1126 23:39:03.029561 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:39:15 crc kubenswrapper[4903]: I1126 23:39:15.459358 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-mm6j8"] Nov 26 23:39:15 crc kubenswrapper[4903]: I1126 23:39:15.462745 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mm6j8" Nov 26 23:39:15 crc kubenswrapper[4903]: I1126 23:39:15.470260 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mm6j8"] Nov 26 23:39:15 crc kubenswrapper[4903]: I1126 23:39:15.591806 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/349bec09-75e8-473c-a578-fd2c6d68e6ab-catalog-content\") pod \"redhat-operators-mm6j8\" (UID: \"349bec09-75e8-473c-a578-fd2c6d68e6ab\") " pod="openshift-marketplace/redhat-operators-mm6j8" Nov 26 23:39:15 crc kubenswrapper[4903]: I1126 23:39:15.591874 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/349bec09-75e8-473c-a578-fd2c6d68e6ab-utilities\") pod \"redhat-operators-mm6j8\" (UID: \"349bec09-75e8-473c-a578-fd2c6d68e6ab\") " pod="openshift-marketplace/redhat-operators-mm6j8" Nov 26 23:39:15 crc kubenswrapper[4903]: I1126 23:39:15.591935 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6t9v\" (UniqueName: \"kubernetes.io/projected/349bec09-75e8-473c-a578-fd2c6d68e6ab-kube-api-access-g6t9v\") pod \"redhat-operators-mm6j8\" (UID: \"349bec09-75e8-473c-a578-fd2c6d68e6ab\") " pod="openshift-marketplace/redhat-operators-mm6j8" Nov 26 23:39:15 crc kubenswrapper[4903]: I1126 23:39:15.694298 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/349bec09-75e8-473c-a578-fd2c6d68e6ab-catalog-content\") pod \"redhat-operators-mm6j8\" (UID: \"349bec09-75e8-473c-a578-fd2c6d68e6ab\") " pod="openshift-marketplace/redhat-operators-mm6j8" Nov 26 23:39:15 crc kubenswrapper[4903]: I1126 23:39:15.694391 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/349bec09-75e8-473c-a578-fd2c6d68e6ab-utilities\") pod \"redhat-operators-mm6j8\" (UID: \"349bec09-75e8-473c-a578-fd2c6d68e6ab\") " pod="openshift-marketplace/redhat-operators-mm6j8" Nov 26 23:39:15 crc kubenswrapper[4903]: I1126 23:39:15.694481 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6t9v\" (UniqueName: \"kubernetes.io/projected/349bec09-75e8-473c-a578-fd2c6d68e6ab-kube-api-access-g6t9v\") pod \"redhat-operators-mm6j8\" (UID: \"349bec09-75e8-473c-a578-fd2c6d68e6ab\") " pod="openshift-marketplace/redhat-operators-mm6j8" Nov 26 23:39:15 crc kubenswrapper[4903]: I1126 23:39:15.694863 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/349bec09-75e8-473c-a578-fd2c6d68e6ab-catalog-content\") pod \"redhat-operators-mm6j8\" (UID: \"349bec09-75e8-473c-a578-fd2c6d68e6ab\") " pod="openshift-marketplace/redhat-operators-mm6j8" Nov 26 23:39:15 crc kubenswrapper[4903]: I1126 23:39:15.694925 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/349bec09-75e8-473c-a578-fd2c6d68e6ab-utilities\") pod \"redhat-operators-mm6j8\" (UID: \"349bec09-75e8-473c-a578-fd2c6d68e6ab\") " pod="openshift-marketplace/redhat-operators-mm6j8" Nov 26 23:39:15 crc kubenswrapper[4903]: I1126 23:39:15.715346 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6t9v\" (UniqueName: \"kubernetes.io/projected/349bec09-75e8-473c-a578-fd2c6d68e6ab-kube-api-access-g6t9v\") pod \"redhat-operators-mm6j8\" (UID: \"349bec09-75e8-473c-a578-fd2c6d68e6ab\") " pod="openshift-marketplace/redhat-operators-mm6j8" Nov 26 23:39:15 crc kubenswrapper[4903]: I1126 23:39:15.788068 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mm6j8" Nov 26 23:39:16 crc kubenswrapper[4903]: I1126 23:39:16.029360 4903 scope.go:117] "RemoveContainer" containerID="1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930" Nov 26 23:39:16 crc kubenswrapper[4903]: E1126 23:39:16.029729 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:39:31 crc kubenswrapper[4903]: I1126 23:39:31.028636 4903 scope.go:117] "RemoveContainer" containerID="1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930" Nov 26 23:39:31 crc kubenswrapper[4903]: E1126 23:39:31.029968 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:39:31 crc kubenswrapper[4903]: E1126 23:39:31.425896 4903 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified" Nov 26 23:39:31 crc kubenswrapper[4903]: E1126 23:39:31.427631 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7d4b7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest_openstack(47c0a41f-61f3-4e6c-8367-a25c5a75d02b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 23:39:31 crc kubenswrapper[4903]: E1126 23:39:31.429014 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest" podUID="47c0a41f-61f3-4e6c-8367-a25c5a75d02b" Nov 26 23:39:32 crc kubenswrapper[4903]: I1126 23:39:32.054415 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mm6j8"] Nov 26 23:39:32 crc kubenswrapper[4903]: I1126 23:39:32.386302 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mm6j8" event={"ID":"349bec09-75e8-473c-a578-fd2c6d68e6ab","Type":"ContainerStarted","Data":"101f4434a7f1859d90779ecbaef7ac35efab33b334c929ecd9732dac2b3b8fd4"} Nov 26 23:39:32 crc kubenswrapper[4903]: E1126 23:39:32.388283 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified\\\"\"" pod="openstack/tempest-tests-tempest" podUID="47c0a41f-61f3-4e6c-8367-a25c5a75d02b" Nov 26 23:39:33 crc kubenswrapper[4903]: I1126 23:39:33.405315 4903 generic.go:334] "Generic (PLEG): container finished" podID="349bec09-75e8-473c-a578-fd2c6d68e6ab" containerID="d4bcb208648bf24863d74b3c2daf4ace5885ca531af718dd57fc65ecc079cd1c" exitCode=0 Nov 26 23:39:33 crc kubenswrapper[4903]: I1126 23:39:33.405406 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mm6j8" event={"ID":"349bec09-75e8-473c-a578-fd2c6d68e6ab","Type":"ContainerDied","Data":"d4bcb208648bf24863d74b3c2daf4ace5885ca531af718dd57fc65ecc079cd1c"} Nov 26 23:39:34 crc kubenswrapper[4903]: I1126 23:39:34.419774 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mm6j8" event={"ID":"349bec09-75e8-473c-a578-fd2c6d68e6ab","Type":"ContainerStarted","Data":"ca2bd139253232f1d639c5fd2f1987ed0a23da9308063e95bfc39b1d1c6895e7"} Nov 26 23:39:38 crc kubenswrapper[4903]: I1126 23:39:38.513537 4903 generic.go:334] "Generic (PLEG): container finished" podID="349bec09-75e8-473c-a578-fd2c6d68e6ab" containerID="ca2bd139253232f1d639c5fd2f1987ed0a23da9308063e95bfc39b1d1c6895e7" exitCode=0 Nov 26 23:39:38 crc kubenswrapper[4903]: I1126 23:39:38.513633 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mm6j8" event={"ID":"349bec09-75e8-473c-a578-fd2c6d68e6ab","Type":"ContainerDied","Data":"ca2bd139253232f1d639c5fd2f1987ed0a23da9308063e95bfc39b1d1c6895e7"} Nov 26 23:39:39 crc kubenswrapper[4903]: I1126 23:39:39.529553 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mm6j8" event={"ID":"349bec09-75e8-473c-a578-fd2c6d68e6ab","Type":"ContainerStarted","Data":"c5c10a817759a40717764d66880a03fa91d881e222c31f79ebc3af3c5a872d52"} Nov 26 23:39:39 crc kubenswrapper[4903]: I1126 23:39:39.558348 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-mm6j8" podStartSLOduration=18.910430102 podStartE2EDuration="24.558325005s" podCreationTimestamp="2025-11-26 23:39:15 +0000 UTC" firstStartedPulling="2025-11-26 23:39:33.408530255 +0000 UTC m=+4702.098765205" lastFinishedPulling="2025-11-26 23:39:39.056425158 +0000 UTC m=+4707.746660108" observedRunningTime="2025-11-26 23:39:39.555056986 +0000 UTC m=+4708.245291906" watchObservedRunningTime="2025-11-26 23:39:39.558325005 +0000 UTC m=+4708.248559925" Nov 26 23:39:44 crc kubenswrapper[4903]: I1126 23:39:44.029161 4903 scope.go:117] "RemoveContainer" containerID="1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930" Nov 26 23:39:44 crc kubenswrapper[4903]: E1126 23:39:44.029975 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:39:45 crc kubenswrapper[4903]: I1126 23:39:45.788242 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-mm6j8" Nov 26 23:39:45 crc kubenswrapper[4903]: I1126 23:39:45.788896 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-mm6j8" Nov 26 23:39:46 crc kubenswrapper[4903]: I1126 23:39:46.532072 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 26 23:39:47 crc kubenswrapper[4903]: I1126 23:39:47.358089 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-mm6j8" podUID="349bec09-75e8-473c-a578-fd2c6d68e6ab" containerName="registry-server" probeResult="failure" output=< Nov 26 23:39:47 crc kubenswrapper[4903]: timeout: failed to connect service ":50051" within 1s Nov 26 23:39:47 crc kubenswrapper[4903]: > Nov 26 23:39:48 crc kubenswrapper[4903]: I1126 23:39:48.654411 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"47c0a41f-61f3-4e6c-8367-a25c5a75d02b","Type":"ContainerStarted","Data":"68e2d88354572b9806a86031cc200cba2198e8427bb0206fbbf738a7eef73da7"} Nov 26 23:39:48 crc kubenswrapper[4903]: I1126 23:39:48.698451 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=4.270153088 podStartE2EDuration="50.698426735s" podCreationTimestamp="2025-11-26 23:38:58 +0000 UTC" firstStartedPulling="2025-11-26 23:39:00.101066658 +0000 UTC m=+4668.791301568" lastFinishedPulling="2025-11-26 23:39:46.529340295 +0000 UTC m=+4715.219575215" observedRunningTime="2025-11-26 23:39:48.683328521 +0000 UTC m=+4717.373563441" watchObservedRunningTime="2025-11-26 23:39:48.698426735 +0000 UTC m=+4717.388661645" Nov 26 23:39:57 crc kubenswrapper[4903]: I1126 23:39:57.028772 4903 scope.go:117] "RemoveContainer" containerID="1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930" Nov 26 23:39:57 crc kubenswrapper[4903]: E1126 23:39:57.030001 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:39:57 crc kubenswrapper[4903]: I1126 23:39:57.743846 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-mm6j8" podUID="349bec09-75e8-473c-a578-fd2c6d68e6ab" containerName="registry-server" probeResult="failure" output=< Nov 26 23:39:57 crc kubenswrapper[4903]: timeout: failed to connect service ":50051" within 1s Nov 26 23:39:57 crc kubenswrapper[4903]: > Nov 26 23:40:05 crc kubenswrapper[4903]: I1126 23:40:05.872139 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-mm6j8" Nov 26 23:40:05 crc kubenswrapper[4903]: I1126 23:40:05.943388 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-mm6j8" Nov 26 23:40:06 crc kubenswrapper[4903]: I1126 23:40:06.121086 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mm6j8"] Nov 26 23:40:07 crc kubenswrapper[4903]: I1126 23:40:07.894291 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-mm6j8" podUID="349bec09-75e8-473c-a578-fd2c6d68e6ab" containerName="registry-server" containerID="cri-o://c5c10a817759a40717764d66880a03fa91d881e222c31f79ebc3af3c5a872d52" gracePeriod=2 Nov 26 23:40:08 crc kubenswrapper[4903]: I1126 23:40:08.527514 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mm6j8" Nov 26 23:40:08 crc kubenswrapper[4903]: I1126 23:40:08.646516 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g6t9v\" (UniqueName: \"kubernetes.io/projected/349bec09-75e8-473c-a578-fd2c6d68e6ab-kube-api-access-g6t9v\") pod \"349bec09-75e8-473c-a578-fd2c6d68e6ab\" (UID: \"349bec09-75e8-473c-a578-fd2c6d68e6ab\") " Nov 26 23:40:08 crc kubenswrapper[4903]: I1126 23:40:08.646957 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/349bec09-75e8-473c-a578-fd2c6d68e6ab-utilities\") pod \"349bec09-75e8-473c-a578-fd2c6d68e6ab\" (UID: \"349bec09-75e8-473c-a578-fd2c6d68e6ab\") " Nov 26 23:40:08 crc kubenswrapper[4903]: I1126 23:40:08.647082 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/349bec09-75e8-473c-a578-fd2c6d68e6ab-catalog-content\") pod \"349bec09-75e8-473c-a578-fd2c6d68e6ab\" (UID: \"349bec09-75e8-473c-a578-fd2c6d68e6ab\") " Nov 26 23:40:08 crc kubenswrapper[4903]: I1126 23:40:08.647843 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/349bec09-75e8-473c-a578-fd2c6d68e6ab-utilities" (OuterVolumeSpecName: "utilities") pod "349bec09-75e8-473c-a578-fd2c6d68e6ab" (UID: "349bec09-75e8-473c-a578-fd2c6d68e6ab"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:40:08 crc kubenswrapper[4903]: I1126 23:40:08.648130 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/349bec09-75e8-473c-a578-fd2c6d68e6ab-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 23:40:08 crc kubenswrapper[4903]: I1126 23:40:08.657015 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/349bec09-75e8-473c-a578-fd2c6d68e6ab-kube-api-access-g6t9v" (OuterVolumeSpecName: "kube-api-access-g6t9v") pod "349bec09-75e8-473c-a578-fd2c6d68e6ab" (UID: "349bec09-75e8-473c-a578-fd2c6d68e6ab"). InnerVolumeSpecName "kube-api-access-g6t9v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:40:08 crc kubenswrapper[4903]: I1126 23:40:08.734033 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/349bec09-75e8-473c-a578-fd2c6d68e6ab-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "349bec09-75e8-473c-a578-fd2c6d68e6ab" (UID: "349bec09-75e8-473c-a578-fd2c6d68e6ab"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:40:08 crc kubenswrapper[4903]: I1126 23:40:08.750372 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g6t9v\" (UniqueName: \"kubernetes.io/projected/349bec09-75e8-473c-a578-fd2c6d68e6ab-kube-api-access-g6t9v\") on node \"crc\" DevicePath \"\"" Nov 26 23:40:08 crc kubenswrapper[4903]: I1126 23:40:08.751808 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/349bec09-75e8-473c-a578-fd2c6d68e6ab-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 23:40:08 crc kubenswrapper[4903]: I1126 23:40:08.909912 4903 generic.go:334] "Generic (PLEG): container finished" podID="349bec09-75e8-473c-a578-fd2c6d68e6ab" containerID="c5c10a817759a40717764d66880a03fa91d881e222c31f79ebc3af3c5a872d52" exitCode=0 Nov 26 23:40:08 crc kubenswrapper[4903]: I1126 23:40:08.909959 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mm6j8" event={"ID":"349bec09-75e8-473c-a578-fd2c6d68e6ab","Type":"ContainerDied","Data":"c5c10a817759a40717764d66880a03fa91d881e222c31f79ebc3af3c5a872d52"} Nov 26 23:40:08 crc kubenswrapper[4903]: I1126 23:40:08.909986 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mm6j8" event={"ID":"349bec09-75e8-473c-a578-fd2c6d68e6ab","Type":"ContainerDied","Data":"101f4434a7f1859d90779ecbaef7ac35efab33b334c929ecd9732dac2b3b8fd4"} Nov 26 23:40:08 crc kubenswrapper[4903]: I1126 23:40:08.910005 4903 scope.go:117] "RemoveContainer" containerID="c5c10a817759a40717764d66880a03fa91d881e222c31f79ebc3af3c5a872d52" Nov 26 23:40:08 crc kubenswrapper[4903]: I1126 23:40:08.910073 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mm6j8" Nov 26 23:40:08 crc kubenswrapper[4903]: I1126 23:40:08.943554 4903 scope.go:117] "RemoveContainer" containerID="ca2bd139253232f1d639c5fd2f1987ed0a23da9308063e95bfc39b1d1c6895e7" Nov 26 23:40:08 crc kubenswrapper[4903]: I1126 23:40:08.992197 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mm6j8"] Nov 26 23:40:09 crc kubenswrapper[4903]: I1126 23:40:09.021991 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-mm6j8"] Nov 26 23:40:09 crc kubenswrapper[4903]: I1126 23:40:09.608044 4903 scope.go:117] "RemoveContainer" containerID="d4bcb208648bf24863d74b3c2daf4ace5885ca531af718dd57fc65ecc079cd1c" Nov 26 23:40:09 crc kubenswrapper[4903]: I1126 23:40:09.682474 4903 scope.go:117] "RemoveContainer" containerID="c5c10a817759a40717764d66880a03fa91d881e222c31f79ebc3af3c5a872d52" Nov 26 23:40:09 crc kubenswrapper[4903]: E1126 23:40:09.683001 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c5c10a817759a40717764d66880a03fa91d881e222c31f79ebc3af3c5a872d52\": container with ID starting with c5c10a817759a40717764d66880a03fa91d881e222c31f79ebc3af3c5a872d52 not found: ID does not exist" containerID="c5c10a817759a40717764d66880a03fa91d881e222c31f79ebc3af3c5a872d52" Nov 26 23:40:09 crc kubenswrapper[4903]: I1126 23:40:09.683047 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c5c10a817759a40717764d66880a03fa91d881e222c31f79ebc3af3c5a872d52"} err="failed to get container status \"c5c10a817759a40717764d66880a03fa91d881e222c31f79ebc3af3c5a872d52\": rpc error: code = NotFound desc = could not find container \"c5c10a817759a40717764d66880a03fa91d881e222c31f79ebc3af3c5a872d52\": container with ID starting with c5c10a817759a40717764d66880a03fa91d881e222c31f79ebc3af3c5a872d52 not found: ID does not exist" Nov 26 23:40:09 crc kubenswrapper[4903]: I1126 23:40:09.683072 4903 scope.go:117] "RemoveContainer" containerID="ca2bd139253232f1d639c5fd2f1987ed0a23da9308063e95bfc39b1d1c6895e7" Nov 26 23:40:09 crc kubenswrapper[4903]: E1126 23:40:09.683594 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca2bd139253232f1d639c5fd2f1987ed0a23da9308063e95bfc39b1d1c6895e7\": container with ID starting with ca2bd139253232f1d639c5fd2f1987ed0a23da9308063e95bfc39b1d1c6895e7 not found: ID does not exist" containerID="ca2bd139253232f1d639c5fd2f1987ed0a23da9308063e95bfc39b1d1c6895e7" Nov 26 23:40:09 crc kubenswrapper[4903]: I1126 23:40:09.683667 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca2bd139253232f1d639c5fd2f1987ed0a23da9308063e95bfc39b1d1c6895e7"} err="failed to get container status \"ca2bd139253232f1d639c5fd2f1987ed0a23da9308063e95bfc39b1d1c6895e7\": rpc error: code = NotFound desc = could not find container \"ca2bd139253232f1d639c5fd2f1987ed0a23da9308063e95bfc39b1d1c6895e7\": container with ID starting with ca2bd139253232f1d639c5fd2f1987ed0a23da9308063e95bfc39b1d1c6895e7 not found: ID does not exist" Nov 26 23:40:09 crc kubenswrapper[4903]: I1126 23:40:09.683749 4903 scope.go:117] "RemoveContainer" containerID="d4bcb208648bf24863d74b3c2daf4ace5885ca531af718dd57fc65ecc079cd1c" Nov 26 23:40:09 crc kubenswrapper[4903]: E1126 23:40:09.684128 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4bcb208648bf24863d74b3c2daf4ace5885ca531af718dd57fc65ecc079cd1c\": container with ID starting with d4bcb208648bf24863d74b3c2daf4ace5885ca531af718dd57fc65ecc079cd1c not found: ID does not exist" containerID="d4bcb208648bf24863d74b3c2daf4ace5885ca531af718dd57fc65ecc079cd1c" Nov 26 23:40:09 crc kubenswrapper[4903]: I1126 23:40:09.684186 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4bcb208648bf24863d74b3c2daf4ace5885ca531af718dd57fc65ecc079cd1c"} err="failed to get container status \"d4bcb208648bf24863d74b3c2daf4ace5885ca531af718dd57fc65ecc079cd1c\": rpc error: code = NotFound desc = could not find container \"d4bcb208648bf24863d74b3c2daf4ace5885ca531af718dd57fc65ecc079cd1c\": container with ID starting with d4bcb208648bf24863d74b3c2daf4ace5885ca531af718dd57fc65ecc079cd1c not found: ID does not exist" Nov 26 23:40:10 crc kubenswrapper[4903]: I1126 23:40:10.030925 4903 scope.go:117] "RemoveContainer" containerID="1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930" Nov 26 23:40:10 crc kubenswrapper[4903]: E1126 23:40:10.033568 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:40:10 crc kubenswrapper[4903]: I1126 23:40:10.065421 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="349bec09-75e8-473c-a578-fd2c6d68e6ab" path="/var/lib/kubelet/pods/349bec09-75e8-473c-a578-fd2c6d68e6ab/volumes" Nov 26 23:40:21 crc kubenswrapper[4903]: I1126 23:40:21.029299 4903 scope.go:117] "RemoveContainer" containerID="1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930" Nov 26 23:40:21 crc kubenswrapper[4903]: E1126 23:40:21.030265 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:40:33 crc kubenswrapper[4903]: I1126 23:40:33.029170 4903 scope.go:117] "RemoveContainer" containerID="1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930" Nov 26 23:40:33 crc kubenswrapper[4903]: E1126 23:40:33.030204 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:40:48 crc kubenswrapper[4903]: I1126 23:40:48.028605 4903 scope.go:117] "RemoveContainer" containerID="1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930" Nov 26 23:40:48 crc kubenswrapper[4903]: E1126 23:40:48.029306 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:41:00 crc kubenswrapper[4903]: I1126 23:41:00.029447 4903 scope.go:117] "RemoveContainer" containerID="1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930" Nov 26 23:41:00 crc kubenswrapper[4903]: E1126 23:41:00.030188 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:41:13 crc kubenswrapper[4903]: I1126 23:41:13.032582 4903 scope.go:117] "RemoveContainer" containerID="1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930" Nov 26 23:41:13 crc kubenswrapper[4903]: E1126 23:41:13.034515 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:41:21 crc kubenswrapper[4903]: I1126 23:41:21.179156 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-srwkr"] Nov 26 23:41:21 crc kubenswrapper[4903]: E1126 23:41:21.180231 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="349bec09-75e8-473c-a578-fd2c6d68e6ab" containerName="registry-server" Nov 26 23:41:21 crc kubenswrapper[4903]: I1126 23:41:21.180247 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="349bec09-75e8-473c-a578-fd2c6d68e6ab" containerName="registry-server" Nov 26 23:41:21 crc kubenswrapper[4903]: E1126 23:41:21.180265 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="349bec09-75e8-473c-a578-fd2c6d68e6ab" containerName="extract-utilities" Nov 26 23:41:21 crc kubenswrapper[4903]: I1126 23:41:21.180272 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="349bec09-75e8-473c-a578-fd2c6d68e6ab" containerName="extract-utilities" Nov 26 23:41:21 crc kubenswrapper[4903]: E1126 23:41:21.180286 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="349bec09-75e8-473c-a578-fd2c6d68e6ab" containerName="extract-content" Nov 26 23:41:21 crc kubenswrapper[4903]: I1126 23:41:21.180291 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="349bec09-75e8-473c-a578-fd2c6d68e6ab" containerName="extract-content" Nov 26 23:41:21 crc kubenswrapper[4903]: I1126 23:41:21.180584 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="349bec09-75e8-473c-a578-fd2c6d68e6ab" containerName="registry-server" Nov 26 23:41:21 crc kubenswrapper[4903]: I1126 23:41:21.182260 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-srwkr" Nov 26 23:41:21 crc kubenswrapper[4903]: I1126 23:41:21.224950 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-srwkr"] Nov 26 23:41:21 crc kubenswrapper[4903]: I1126 23:41:21.233926 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8eed55d6-2272-4d5e-aed9-15d33845a61b-catalog-content\") pod \"community-operators-srwkr\" (UID: \"8eed55d6-2272-4d5e-aed9-15d33845a61b\") " pod="openshift-marketplace/community-operators-srwkr" Nov 26 23:41:21 crc kubenswrapper[4903]: I1126 23:41:21.234130 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8szkm\" (UniqueName: \"kubernetes.io/projected/8eed55d6-2272-4d5e-aed9-15d33845a61b-kube-api-access-8szkm\") pod \"community-operators-srwkr\" (UID: \"8eed55d6-2272-4d5e-aed9-15d33845a61b\") " pod="openshift-marketplace/community-operators-srwkr" Nov 26 23:41:21 crc kubenswrapper[4903]: I1126 23:41:21.234221 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8eed55d6-2272-4d5e-aed9-15d33845a61b-utilities\") pod \"community-operators-srwkr\" (UID: \"8eed55d6-2272-4d5e-aed9-15d33845a61b\") " pod="openshift-marketplace/community-operators-srwkr" Nov 26 23:41:21 crc kubenswrapper[4903]: I1126 23:41:21.343647 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8szkm\" (UniqueName: \"kubernetes.io/projected/8eed55d6-2272-4d5e-aed9-15d33845a61b-kube-api-access-8szkm\") pod \"community-operators-srwkr\" (UID: \"8eed55d6-2272-4d5e-aed9-15d33845a61b\") " pod="openshift-marketplace/community-operators-srwkr" Nov 26 23:41:21 crc kubenswrapper[4903]: I1126 23:41:21.343745 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8eed55d6-2272-4d5e-aed9-15d33845a61b-catalog-content\") pod \"community-operators-srwkr\" (UID: \"8eed55d6-2272-4d5e-aed9-15d33845a61b\") " pod="openshift-marketplace/community-operators-srwkr" Nov 26 23:41:21 crc kubenswrapper[4903]: I1126 23:41:21.343782 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8eed55d6-2272-4d5e-aed9-15d33845a61b-utilities\") pod \"community-operators-srwkr\" (UID: \"8eed55d6-2272-4d5e-aed9-15d33845a61b\") " pod="openshift-marketplace/community-operators-srwkr" Nov 26 23:41:21 crc kubenswrapper[4903]: I1126 23:41:21.346556 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8eed55d6-2272-4d5e-aed9-15d33845a61b-catalog-content\") pod \"community-operators-srwkr\" (UID: \"8eed55d6-2272-4d5e-aed9-15d33845a61b\") " pod="openshift-marketplace/community-operators-srwkr" Nov 26 23:41:21 crc kubenswrapper[4903]: I1126 23:41:21.346661 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8eed55d6-2272-4d5e-aed9-15d33845a61b-utilities\") pod \"community-operators-srwkr\" (UID: \"8eed55d6-2272-4d5e-aed9-15d33845a61b\") " pod="openshift-marketplace/community-operators-srwkr" Nov 26 23:41:21 crc kubenswrapper[4903]: I1126 23:41:21.371157 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8szkm\" (UniqueName: \"kubernetes.io/projected/8eed55d6-2272-4d5e-aed9-15d33845a61b-kube-api-access-8szkm\") pod \"community-operators-srwkr\" (UID: \"8eed55d6-2272-4d5e-aed9-15d33845a61b\") " pod="openshift-marketplace/community-operators-srwkr" Nov 26 23:41:21 crc kubenswrapper[4903]: I1126 23:41:21.505855 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-srwkr" Nov 26 23:41:22 crc kubenswrapper[4903]: I1126 23:41:22.189868 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-srwkr"] Nov 26 23:41:23 crc kubenswrapper[4903]: I1126 23:41:23.810365 4903 generic.go:334] "Generic (PLEG): container finished" podID="8eed55d6-2272-4d5e-aed9-15d33845a61b" containerID="5c8b6c2e26245a5267ffcf92c30009d7d8b5ddd7fcd64ab2816136f19c3678d1" exitCode=0 Nov 26 23:41:23 crc kubenswrapper[4903]: I1126 23:41:23.810466 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-srwkr" event={"ID":"8eed55d6-2272-4d5e-aed9-15d33845a61b","Type":"ContainerDied","Data":"5c8b6c2e26245a5267ffcf92c30009d7d8b5ddd7fcd64ab2816136f19c3678d1"} Nov 26 23:41:23 crc kubenswrapper[4903]: I1126 23:41:23.810898 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-srwkr" event={"ID":"8eed55d6-2272-4d5e-aed9-15d33845a61b","Type":"ContainerStarted","Data":"733c3e65adba385f78bbc256873fb1959d1cca018a343f7489487b5fdd515ce1"} Nov 26 23:41:24 crc kubenswrapper[4903]: I1126 23:41:24.822851 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-srwkr" event={"ID":"8eed55d6-2272-4d5e-aed9-15d33845a61b","Type":"ContainerStarted","Data":"e0faf4e2eb57ea0204e91efb80414849be42268ccbe85d86dded726dfe158487"} Nov 26 23:41:25 crc kubenswrapper[4903]: I1126 23:41:25.028909 4903 scope.go:117] "RemoveContainer" containerID="1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930" Nov 26 23:41:25 crc kubenswrapper[4903]: E1126 23:41:25.029324 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:41:26 crc kubenswrapper[4903]: I1126 23:41:26.846092 4903 generic.go:334] "Generic (PLEG): container finished" podID="8eed55d6-2272-4d5e-aed9-15d33845a61b" containerID="e0faf4e2eb57ea0204e91efb80414849be42268ccbe85d86dded726dfe158487" exitCode=0 Nov 26 23:41:26 crc kubenswrapper[4903]: I1126 23:41:26.846163 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-srwkr" event={"ID":"8eed55d6-2272-4d5e-aed9-15d33845a61b","Type":"ContainerDied","Data":"e0faf4e2eb57ea0204e91efb80414849be42268ccbe85d86dded726dfe158487"} Nov 26 23:41:27 crc kubenswrapper[4903]: I1126 23:41:27.861471 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-srwkr" event={"ID":"8eed55d6-2272-4d5e-aed9-15d33845a61b","Type":"ContainerStarted","Data":"3ea3115cadc76f3d70932543401dc15b4bef279a449b5252c78c566ff41e6d1c"} Nov 26 23:41:27 crc kubenswrapper[4903]: I1126 23:41:27.886335 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-srwkr" podStartSLOduration=3.371487367 podStartE2EDuration="6.886317548s" podCreationTimestamp="2025-11-26 23:41:21 +0000 UTC" firstStartedPulling="2025-11-26 23:41:23.813728244 +0000 UTC m=+4812.503963154" lastFinishedPulling="2025-11-26 23:41:27.328558425 +0000 UTC m=+4816.018793335" observedRunningTime="2025-11-26 23:41:27.876788743 +0000 UTC m=+4816.567023653" watchObservedRunningTime="2025-11-26 23:41:27.886317548 +0000 UTC m=+4816.576552458" Nov 26 23:41:31 crc kubenswrapper[4903]: I1126 23:41:31.506875 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-srwkr" Nov 26 23:41:31 crc kubenswrapper[4903]: I1126 23:41:31.507513 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-srwkr" Nov 26 23:41:32 crc kubenswrapper[4903]: I1126 23:41:32.558270 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-srwkr" podUID="8eed55d6-2272-4d5e-aed9-15d33845a61b" containerName="registry-server" probeResult="failure" output=< Nov 26 23:41:32 crc kubenswrapper[4903]: timeout: failed to connect service ":50051" within 1s Nov 26 23:41:32 crc kubenswrapper[4903]: > Nov 26 23:41:39 crc kubenswrapper[4903]: I1126 23:41:39.029298 4903 scope.go:117] "RemoveContainer" containerID="1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930" Nov 26 23:41:39 crc kubenswrapper[4903]: E1126 23:41:39.030231 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:41:41 crc kubenswrapper[4903]: I1126 23:41:41.575365 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-srwkr" Nov 26 23:41:41 crc kubenswrapper[4903]: I1126 23:41:41.639753 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-srwkr" Nov 26 23:41:41 crc kubenswrapper[4903]: I1126 23:41:41.829878 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-srwkr"] Nov 26 23:41:43 crc kubenswrapper[4903]: I1126 23:41:43.042632 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-srwkr" podUID="8eed55d6-2272-4d5e-aed9-15d33845a61b" containerName="registry-server" containerID="cri-o://3ea3115cadc76f3d70932543401dc15b4bef279a449b5252c78c566ff41e6d1c" gracePeriod=2 Nov 26 23:41:44 crc kubenswrapper[4903]: I1126 23:41:44.081286 4903 generic.go:334] "Generic (PLEG): container finished" podID="8eed55d6-2272-4d5e-aed9-15d33845a61b" containerID="3ea3115cadc76f3d70932543401dc15b4bef279a449b5252c78c566ff41e6d1c" exitCode=0 Nov 26 23:41:44 crc kubenswrapper[4903]: I1126 23:41:44.081537 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-srwkr" event={"ID":"8eed55d6-2272-4d5e-aed9-15d33845a61b","Type":"ContainerDied","Data":"3ea3115cadc76f3d70932543401dc15b4bef279a449b5252c78c566ff41e6d1c"} Nov 26 23:41:44 crc kubenswrapper[4903]: I1126 23:41:44.522800 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-srwkr" Nov 26 23:41:44 crc kubenswrapper[4903]: I1126 23:41:44.614267 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8eed55d6-2272-4d5e-aed9-15d33845a61b-catalog-content\") pod \"8eed55d6-2272-4d5e-aed9-15d33845a61b\" (UID: \"8eed55d6-2272-4d5e-aed9-15d33845a61b\") " Nov 26 23:41:44 crc kubenswrapper[4903]: I1126 23:41:44.614354 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8eed55d6-2272-4d5e-aed9-15d33845a61b-utilities\") pod \"8eed55d6-2272-4d5e-aed9-15d33845a61b\" (UID: \"8eed55d6-2272-4d5e-aed9-15d33845a61b\") " Nov 26 23:41:44 crc kubenswrapper[4903]: I1126 23:41:44.614473 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8szkm\" (UniqueName: \"kubernetes.io/projected/8eed55d6-2272-4d5e-aed9-15d33845a61b-kube-api-access-8szkm\") pod \"8eed55d6-2272-4d5e-aed9-15d33845a61b\" (UID: \"8eed55d6-2272-4d5e-aed9-15d33845a61b\") " Nov 26 23:41:44 crc kubenswrapper[4903]: I1126 23:41:44.617788 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8eed55d6-2272-4d5e-aed9-15d33845a61b-utilities" (OuterVolumeSpecName: "utilities") pod "8eed55d6-2272-4d5e-aed9-15d33845a61b" (UID: "8eed55d6-2272-4d5e-aed9-15d33845a61b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:41:44 crc kubenswrapper[4903]: I1126 23:41:44.668299 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8eed55d6-2272-4d5e-aed9-15d33845a61b-kube-api-access-8szkm" (OuterVolumeSpecName: "kube-api-access-8szkm") pod "8eed55d6-2272-4d5e-aed9-15d33845a61b" (UID: "8eed55d6-2272-4d5e-aed9-15d33845a61b"). InnerVolumeSpecName "kube-api-access-8szkm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:41:44 crc kubenswrapper[4903]: I1126 23:41:44.692346 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8eed55d6-2272-4d5e-aed9-15d33845a61b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8eed55d6-2272-4d5e-aed9-15d33845a61b" (UID: "8eed55d6-2272-4d5e-aed9-15d33845a61b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:41:44 crc kubenswrapper[4903]: I1126 23:41:44.717506 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8eed55d6-2272-4d5e-aed9-15d33845a61b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 23:41:44 crc kubenswrapper[4903]: I1126 23:41:44.717545 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8eed55d6-2272-4d5e-aed9-15d33845a61b-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 23:41:44 crc kubenswrapper[4903]: I1126 23:41:44.717559 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8szkm\" (UniqueName: \"kubernetes.io/projected/8eed55d6-2272-4d5e-aed9-15d33845a61b-kube-api-access-8szkm\") on node \"crc\" DevicePath \"\"" Nov 26 23:41:45 crc kubenswrapper[4903]: I1126 23:41:45.093535 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-srwkr" event={"ID":"8eed55d6-2272-4d5e-aed9-15d33845a61b","Type":"ContainerDied","Data":"733c3e65adba385f78bbc256873fb1959d1cca018a343f7489487b5fdd515ce1"} Nov 26 23:41:45 crc kubenswrapper[4903]: I1126 23:41:45.093589 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-srwkr" Nov 26 23:41:45 crc kubenswrapper[4903]: I1126 23:41:45.094316 4903 scope.go:117] "RemoveContainer" containerID="3ea3115cadc76f3d70932543401dc15b4bef279a449b5252c78c566ff41e6d1c" Nov 26 23:41:45 crc kubenswrapper[4903]: I1126 23:41:45.124115 4903 scope.go:117] "RemoveContainer" containerID="e0faf4e2eb57ea0204e91efb80414849be42268ccbe85d86dded726dfe158487" Nov 26 23:41:45 crc kubenswrapper[4903]: I1126 23:41:45.160390 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-srwkr"] Nov 26 23:41:45 crc kubenswrapper[4903]: I1126 23:41:45.166430 4903 scope.go:117] "RemoveContainer" containerID="5c8b6c2e26245a5267ffcf92c30009d7d8b5ddd7fcd64ab2816136f19c3678d1" Nov 26 23:41:45 crc kubenswrapper[4903]: I1126 23:41:45.176154 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-srwkr"] Nov 26 23:41:46 crc kubenswrapper[4903]: I1126 23:41:46.040488 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8eed55d6-2272-4d5e-aed9-15d33845a61b" path="/var/lib/kubelet/pods/8eed55d6-2272-4d5e-aed9-15d33845a61b/volumes" Nov 26 23:41:53 crc kubenswrapper[4903]: I1126 23:41:53.029420 4903 scope.go:117] "RemoveContainer" containerID="1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930" Nov 26 23:41:53 crc kubenswrapper[4903]: E1126 23:41:53.030740 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:42:07 crc kubenswrapper[4903]: I1126 23:42:07.028954 4903 scope.go:117] "RemoveContainer" containerID="1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930" Nov 26 23:42:07 crc kubenswrapper[4903]: E1126 23:42:07.029913 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:42:19 crc kubenswrapper[4903]: I1126 23:42:19.029133 4903 scope.go:117] "RemoveContainer" containerID="1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930" Nov 26 23:42:19 crc kubenswrapper[4903]: E1126 23:42:19.030015 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:42:32 crc kubenswrapper[4903]: I1126 23:42:32.040147 4903 scope.go:117] "RemoveContainer" containerID="1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930" Nov 26 23:42:32 crc kubenswrapper[4903]: E1126 23:42:32.040976 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:42:34 crc kubenswrapper[4903]: I1126 23:42:34.354232 4903 trace.go:236] Trace[298938788]: "Calculate volume metrics of storage for pod openshift-logging/logging-loki-index-gateway-0" (26-Nov-2025 23:42:33.309) (total time: 1028ms): Nov 26 23:42:34 crc kubenswrapper[4903]: Trace[298938788]: [1.02836087s] [1.02836087s] END Nov 26 23:42:46 crc kubenswrapper[4903]: I1126 23:42:46.029861 4903 scope.go:117] "RemoveContainer" containerID="1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930" Nov 26 23:42:46 crc kubenswrapper[4903]: I1126 23:42:46.839130 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerStarted","Data":"f1d32d64bc9a53cf86e63de331b7164e7ef8e8d60eb2f6fec19ce11a2354de21"} Nov 26 23:45:00 crc kubenswrapper[4903]: I1126 23:45:00.213963 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403345-794mg"] Nov 26 23:45:00 crc kubenswrapper[4903]: E1126 23:45:00.214986 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8eed55d6-2272-4d5e-aed9-15d33845a61b" containerName="extract-content" Nov 26 23:45:00 crc kubenswrapper[4903]: I1126 23:45:00.215004 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="8eed55d6-2272-4d5e-aed9-15d33845a61b" containerName="extract-content" Nov 26 23:45:00 crc kubenswrapper[4903]: E1126 23:45:00.215050 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8eed55d6-2272-4d5e-aed9-15d33845a61b" containerName="extract-utilities" Nov 26 23:45:00 crc kubenswrapper[4903]: I1126 23:45:00.215100 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="8eed55d6-2272-4d5e-aed9-15d33845a61b" containerName="extract-utilities" Nov 26 23:45:00 crc kubenswrapper[4903]: E1126 23:45:00.215184 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8eed55d6-2272-4d5e-aed9-15d33845a61b" containerName="registry-server" Nov 26 23:45:00 crc kubenswrapper[4903]: I1126 23:45:00.215194 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="8eed55d6-2272-4d5e-aed9-15d33845a61b" containerName="registry-server" Nov 26 23:45:00 crc kubenswrapper[4903]: I1126 23:45:00.215789 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="8eed55d6-2272-4d5e-aed9-15d33845a61b" containerName="registry-server" Nov 26 23:45:00 crc kubenswrapper[4903]: I1126 23:45:00.217502 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403345-794mg" Nov 26 23:45:00 crc kubenswrapper[4903]: I1126 23:45:00.220660 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 23:45:00 crc kubenswrapper[4903]: I1126 23:45:00.221878 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 23:45:00 crc kubenswrapper[4903]: I1126 23:45:00.299574 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7-config-volume\") pod \"collect-profiles-29403345-794mg\" (UID: \"1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403345-794mg" Nov 26 23:45:00 crc kubenswrapper[4903]: I1126 23:45:00.299665 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7-secret-volume\") pod \"collect-profiles-29403345-794mg\" (UID: \"1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403345-794mg" Nov 26 23:45:00 crc kubenswrapper[4903]: I1126 23:45:00.299724 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mvnps\" (UniqueName: \"kubernetes.io/projected/1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7-kube-api-access-mvnps\") pod \"collect-profiles-29403345-794mg\" (UID: \"1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403345-794mg" Nov 26 23:45:00 crc kubenswrapper[4903]: I1126 23:45:00.309620 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403345-794mg"] Nov 26 23:45:00 crc kubenswrapper[4903]: I1126 23:45:00.402358 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7-config-volume\") pod \"collect-profiles-29403345-794mg\" (UID: \"1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403345-794mg" Nov 26 23:45:00 crc kubenswrapper[4903]: I1126 23:45:00.402418 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7-secret-volume\") pod \"collect-profiles-29403345-794mg\" (UID: \"1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403345-794mg" Nov 26 23:45:00 crc kubenswrapper[4903]: I1126 23:45:00.402441 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mvnps\" (UniqueName: \"kubernetes.io/projected/1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7-kube-api-access-mvnps\") pod \"collect-profiles-29403345-794mg\" (UID: \"1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403345-794mg" Nov 26 23:45:00 crc kubenswrapper[4903]: I1126 23:45:00.403453 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7-config-volume\") pod \"collect-profiles-29403345-794mg\" (UID: \"1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403345-794mg" Nov 26 23:45:00 crc kubenswrapper[4903]: I1126 23:45:00.417404 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7-secret-volume\") pod \"collect-profiles-29403345-794mg\" (UID: \"1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403345-794mg" Nov 26 23:45:00 crc kubenswrapper[4903]: I1126 23:45:00.420120 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mvnps\" (UniqueName: \"kubernetes.io/projected/1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7-kube-api-access-mvnps\") pod \"collect-profiles-29403345-794mg\" (UID: \"1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403345-794mg" Nov 26 23:45:00 crc kubenswrapper[4903]: I1126 23:45:00.539363 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403345-794mg" Nov 26 23:45:01 crc kubenswrapper[4903]: I1126 23:45:01.032133 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403345-794mg"] Nov 26 23:45:01 crc kubenswrapper[4903]: I1126 23:45:01.501052 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403345-794mg" event={"ID":"1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7","Type":"ContainerStarted","Data":"4046dd2f1cea2ba105714d814603eb71388c0c1b0283b76d0ef9f5472945972f"} Nov 26 23:45:01 crc kubenswrapper[4903]: I1126 23:45:01.501105 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403345-794mg" event={"ID":"1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7","Type":"ContainerStarted","Data":"25133aec28cc36a53a0fc185ed2a4be9c747617278b0990fdf8f68b499660c9e"} Nov 26 23:45:01 crc kubenswrapper[4903]: I1126 23:45:01.530626 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29403345-794mg" podStartSLOduration=1.530603237 podStartE2EDuration="1.530603237s" podCreationTimestamp="2025-11-26 23:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 23:45:01.514213678 +0000 UTC m=+5030.204448608" watchObservedRunningTime="2025-11-26 23:45:01.530603237 +0000 UTC m=+5030.220838157" Nov 26 23:45:01 crc kubenswrapper[4903]: I1126 23:45:01.981015 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 23:45:01 crc kubenswrapper[4903]: I1126 23:45:01.981288 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 23:45:02 crc kubenswrapper[4903]: I1126 23:45:02.521061 4903 generic.go:334] "Generic (PLEG): container finished" podID="1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7" containerID="4046dd2f1cea2ba105714d814603eb71388c0c1b0283b76d0ef9f5472945972f" exitCode=0 Nov 26 23:45:02 crc kubenswrapper[4903]: I1126 23:45:02.521853 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403345-794mg" event={"ID":"1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7","Type":"ContainerDied","Data":"4046dd2f1cea2ba105714d814603eb71388c0c1b0283b76d0ef9f5472945972f"} Nov 26 23:45:04 crc kubenswrapper[4903]: I1126 23:45:04.045745 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403345-794mg" Nov 26 23:45:04 crc kubenswrapper[4903]: I1126 23:45:04.195824 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7-secret-volume\") pod \"1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7\" (UID: \"1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7\") " Nov 26 23:45:04 crc kubenswrapper[4903]: I1126 23:45:04.195901 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7-config-volume\") pod \"1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7\" (UID: \"1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7\") " Nov 26 23:45:04 crc kubenswrapper[4903]: I1126 23:45:04.196066 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mvnps\" (UniqueName: \"kubernetes.io/projected/1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7-kube-api-access-mvnps\") pod \"1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7\" (UID: \"1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7\") " Nov 26 23:45:04 crc kubenswrapper[4903]: I1126 23:45:04.197635 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7-config-volume" (OuterVolumeSpecName: "config-volume") pod "1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7" (UID: "1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 23:45:04 crc kubenswrapper[4903]: I1126 23:45:04.198417 4903 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 23:45:04 crc kubenswrapper[4903]: I1126 23:45:04.559999 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403345-794mg" event={"ID":"1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7","Type":"ContainerDied","Data":"25133aec28cc36a53a0fc185ed2a4be9c747617278b0990fdf8f68b499660c9e"} Nov 26 23:45:04 crc kubenswrapper[4903]: I1126 23:45:04.560380 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="25133aec28cc36a53a0fc185ed2a4be9c747617278b0990fdf8f68b499660c9e" Nov 26 23:45:04 crc kubenswrapper[4903]: I1126 23:45:04.560047 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403345-794mg" Nov 26 23:45:04 crc kubenswrapper[4903]: I1126 23:45:04.657297 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403300-mw9h4"] Nov 26 23:45:04 crc kubenswrapper[4903]: I1126 23:45:04.682390 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403300-mw9h4"] Nov 26 23:45:04 crc kubenswrapper[4903]: I1126 23:45:04.980937 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7-kube-api-access-mvnps" (OuterVolumeSpecName: "kube-api-access-mvnps") pod "1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7" (UID: "1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7"). InnerVolumeSpecName "kube-api-access-mvnps". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:45:04 crc kubenswrapper[4903]: I1126 23:45:04.984925 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7" (UID: "1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:45:05 crc kubenswrapper[4903]: I1126 23:45:05.031470 4903 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 23:45:05 crc kubenswrapper[4903]: I1126 23:45:05.031507 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mvnps\" (UniqueName: \"kubernetes.io/projected/1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7-kube-api-access-mvnps\") on node \"crc\" DevicePath \"\"" Nov 26 23:45:06 crc kubenswrapper[4903]: I1126 23:45:06.049037 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f993922-4feb-4ed0-9748-58ce116e51a5" path="/var/lib/kubelet/pods/6f993922-4feb-4ed0-9748-58ce116e51a5/volumes" Nov 26 23:45:23 crc kubenswrapper[4903]: I1126 23:45:23.777111 4903 scope.go:117] "RemoveContainer" containerID="e0de3ff83f1090fad3af95cb5fd4dad641981156ab0872d14e053be32b82b376" Nov 26 23:45:31 crc kubenswrapper[4903]: I1126 23:45:31.980799 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 23:45:31 crc kubenswrapper[4903]: I1126 23:45:31.981512 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 23:46:01 crc kubenswrapper[4903]: I1126 23:46:01.987350 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 23:46:01 crc kubenswrapper[4903]: I1126 23:46:01.990066 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 23:46:01 crc kubenswrapper[4903]: I1126 23:46:01.990324 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 23:46:01 crc kubenswrapper[4903]: I1126 23:46:01.992103 4903 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f1d32d64bc9a53cf86e63de331b7164e7ef8e8d60eb2f6fec19ce11a2354de21"} pod="openshift-machine-config-operator/machine-config-daemon-wjwph" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 23:46:01 crc kubenswrapper[4903]: I1126 23:46:01.992354 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" containerID="cri-o://f1d32d64bc9a53cf86e63de331b7164e7ef8e8d60eb2f6fec19ce11a2354de21" gracePeriod=600 Nov 26 23:46:02 crc kubenswrapper[4903]: I1126 23:46:02.350164 4903 generic.go:334] "Generic (PLEG): container finished" podID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerID="f1d32d64bc9a53cf86e63de331b7164e7ef8e8d60eb2f6fec19ce11a2354de21" exitCode=0 Nov 26 23:46:02 crc kubenswrapper[4903]: I1126 23:46:02.350245 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerDied","Data":"f1d32d64bc9a53cf86e63de331b7164e7ef8e8d60eb2f6fec19ce11a2354de21"} Nov 26 23:46:02 crc kubenswrapper[4903]: I1126 23:46:02.350619 4903 scope.go:117] "RemoveContainer" containerID="1ed685a845cbbe0adb0e9e488f19e1c0d6fe1b3fb91fd8914eb3797087cfe930" Nov 26 23:46:03 crc kubenswrapper[4903]: I1126 23:46:03.368636 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerStarted","Data":"8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084"} Nov 26 23:47:34 crc kubenswrapper[4903]: I1126 23:47:34.797236 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-gjj7p"] Nov 26 23:47:34 crc kubenswrapper[4903]: E1126 23:47:34.798415 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7" containerName="collect-profiles" Nov 26 23:47:34 crc kubenswrapper[4903]: I1126 23:47:34.798434 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7" containerName="collect-profiles" Nov 26 23:47:34 crc kubenswrapper[4903]: I1126 23:47:34.798736 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c7d76ca-d323-4c01-a2ea-697c8d5ee4e7" containerName="collect-profiles" Nov 26 23:47:34 crc kubenswrapper[4903]: I1126 23:47:34.801842 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gjj7p" Nov 26 23:47:34 crc kubenswrapper[4903]: I1126 23:47:34.839856 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gjj7p"] Nov 26 23:47:34 crc kubenswrapper[4903]: I1126 23:47:34.912252 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skt82\" (UniqueName: \"kubernetes.io/projected/087f719c-fd21-41ea-ae9f-2ac1c24e3933-kube-api-access-skt82\") pod \"redhat-marketplace-gjj7p\" (UID: \"087f719c-fd21-41ea-ae9f-2ac1c24e3933\") " pod="openshift-marketplace/redhat-marketplace-gjj7p" Nov 26 23:47:34 crc kubenswrapper[4903]: I1126 23:47:34.912532 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/087f719c-fd21-41ea-ae9f-2ac1c24e3933-catalog-content\") pod \"redhat-marketplace-gjj7p\" (UID: \"087f719c-fd21-41ea-ae9f-2ac1c24e3933\") " pod="openshift-marketplace/redhat-marketplace-gjj7p" Nov 26 23:47:34 crc kubenswrapper[4903]: I1126 23:47:34.912580 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/087f719c-fd21-41ea-ae9f-2ac1c24e3933-utilities\") pod \"redhat-marketplace-gjj7p\" (UID: \"087f719c-fd21-41ea-ae9f-2ac1c24e3933\") " pod="openshift-marketplace/redhat-marketplace-gjj7p" Nov 26 23:47:35 crc kubenswrapper[4903]: I1126 23:47:35.014561 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skt82\" (UniqueName: \"kubernetes.io/projected/087f719c-fd21-41ea-ae9f-2ac1c24e3933-kube-api-access-skt82\") pod \"redhat-marketplace-gjj7p\" (UID: \"087f719c-fd21-41ea-ae9f-2ac1c24e3933\") " pod="openshift-marketplace/redhat-marketplace-gjj7p" Nov 26 23:47:35 crc kubenswrapper[4903]: I1126 23:47:35.014830 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/087f719c-fd21-41ea-ae9f-2ac1c24e3933-catalog-content\") pod \"redhat-marketplace-gjj7p\" (UID: \"087f719c-fd21-41ea-ae9f-2ac1c24e3933\") " pod="openshift-marketplace/redhat-marketplace-gjj7p" Nov 26 23:47:35 crc kubenswrapper[4903]: I1126 23:47:35.014886 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/087f719c-fd21-41ea-ae9f-2ac1c24e3933-utilities\") pod \"redhat-marketplace-gjj7p\" (UID: \"087f719c-fd21-41ea-ae9f-2ac1c24e3933\") " pod="openshift-marketplace/redhat-marketplace-gjj7p" Nov 26 23:47:35 crc kubenswrapper[4903]: I1126 23:47:35.015472 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/087f719c-fd21-41ea-ae9f-2ac1c24e3933-catalog-content\") pod \"redhat-marketplace-gjj7p\" (UID: \"087f719c-fd21-41ea-ae9f-2ac1c24e3933\") " pod="openshift-marketplace/redhat-marketplace-gjj7p" Nov 26 23:47:35 crc kubenswrapper[4903]: I1126 23:47:35.015532 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/087f719c-fd21-41ea-ae9f-2ac1c24e3933-utilities\") pod \"redhat-marketplace-gjj7p\" (UID: \"087f719c-fd21-41ea-ae9f-2ac1c24e3933\") " pod="openshift-marketplace/redhat-marketplace-gjj7p" Nov 26 23:47:35 crc kubenswrapper[4903]: I1126 23:47:35.042163 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skt82\" (UniqueName: \"kubernetes.io/projected/087f719c-fd21-41ea-ae9f-2ac1c24e3933-kube-api-access-skt82\") pod \"redhat-marketplace-gjj7p\" (UID: \"087f719c-fd21-41ea-ae9f-2ac1c24e3933\") " pod="openshift-marketplace/redhat-marketplace-gjj7p" Nov 26 23:47:35 crc kubenswrapper[4903]: I1126 23:47:35.133081 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gjj7p" Nov 26 23:47:36 crc kubenswrapper[4903]: I1126 23:47:36.145748 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gjj7p"] Nov 26 23:47:36 crc kubenswrapper[4903]: I1126 23:47:36.644331 4903 generic.go:334] "Generic (PLEG): container finished" podID="087f719c-fd21-41ea-ae9f-2ac1c24e3933" containerID="c98ba747be1172f7fd1eab8b52300ee7d9a8ee006c8e6fc1ec1a4fa8261015b1" exitCode=0 Nov 26 23:47:36 crc kubenswrapper[4903]: I1126 23:47:36.644399 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gjj7p" event={"ID":"087f719c-fd21-41ea-ae9f-2ac1c24e3933","Type":"ContainerDied","Data":"c98ba747be1172f7fd1eab8b52300ee7d9a8ee006c8e6fc1ec1a4fa8261015b1"} Nov 26 23:47:36 crc kubenswrapper[4903]: I1126 23:47:36.644607 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gjj7p" event={"ID":"087f719c-fd21-41ea-ae9f-2ac1c24e3933","Type":"ContainerStarted","Data":"f8e458596e7a9505e458626d5e65e27de707e2fdf92729677de00d45a920ddd5"} Nov 26 23:47:36 crc kubenswrapper[4903]: I1126 23:47:36.646570 4903 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 23:47:37 crc kubenswrapper[4903]: I1126 23:47:37.656339 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gjj7p" event={"ID":"087f719c-fd21-41ea-ae9f-2ac1c24e3933","Type":"ContainerStarted","Data":"a9644ee458c2d09c376b4257d1099d2be493962a700c5f046253ba85ec9b2fcd"} Nov 26 23:47:39 crc kubenswrapper[4903]: I1126 23:47:39.686153 4903 generic.go:334] "Generic (PLEG): container finished" podID="087f719c-fd21-41ea-ae9f-2ac1c24e3933" containerID="a9644ee458c2d09c376b4257d1099d2be493962a700c5f046253ba85ec9b2fcd" exitCode=0 Nov 26 23:47:39 crc kubenswrapper[4903]: I1126 23:47:39.686205 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gjj7p" event={"ID":"087f719c-fd21-41ea-ae9f-2ac1c24e3933","Type":"ContainerDied","Data":"a9644ee458c2d09c376b4257d1099d2be493962a700c5f046253ba85ec9b2fcd"} Nov 26 23:47:40 crc kubenswrapper[4903]: I1126 23:47:40.699502 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gjj7p" event={"ID":"087f719c-fd21-41ea-ae9f-2ac1c24e3933","Type":"ContainerStarted","Data":"6dd4aee65de79dce53d0186a78ebb437f88c057c8cb8bb96d8956b69be30ef67"} Nov 26 23:47:40 crc kubenswrapper[4903]: I1126 23:47:40.767716 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-gjj7p" podStartSLOduration=3.287709729 podStartE2EDuration="6.767689316s" podCreationTimestamp="2025-11-26 23:47:34 +0000 UTC" firstStartedPulling="2025-11-26 23:47:36.646384059 +0000 UTC m=+5185.336618969" lastFinishedPulling="2025-11-26 23:47:40.126363646 +0000 UTC m=+5188.816598556" observedRunningTime="2025-11-26 23:47:40.763796742 +0000 UTC m=+5189.454031662" watchObservedRunningTime="2025-11-26 23:47:40.767689316 +0000 UTC m=+5189.457924216" Nov 26 23:47:45 crc kubenswrapper[4903]: I1126 23:47:45.133435 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-gjj7p" Nov 26 23:47:45 crc kubenswrapper[4903]: I1126 23:47:45.134045 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-gjj7p" Nov 26 23:47:45 crc kubenswrapper[4903]: I1126 23:47:45.187827 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-gjj7p" Nov 26 23:47:45 crc kubenswrapper[4903]: I1126 23:47:45.845889 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-gjj7p" Nov 26 23:47:45 crc kubenswrapper[4903]: I1126 23:47:45.914316 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gjj7p"] Nov 26 23:47:47 crc kubenswrapper[4903]: I1126 23:47:47.794886 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-gjj7p" podUID="087f719c-fd21-41ea-ae9f-2ac1c24e3933" containerName="registry-server" containerID="cri-o://6dd4aee65de79dce53d0186a78ebb437f88c057c8cb8bb96d8956b69be30ef67" gracePeriod=2 Nov 26 23:47:48 crc kubenswrapper[4903]: I1126 23:47:48.471632 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gjj7p" Nov 26 23:47:48 crc kubenswrapper[4903]: I1126 23:47:48.565570 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-skt82\" (UniqueName: \"kubernetes.io/projected/087f719c-fd21-41ea-ae9f-2ac1c24e3933-kube-api-access-skt82\") pod \"087f719c-fd21-41ea-ae9f-2ac1c24e3933\" (UID: \"087f719c-fd21-41ea-ae9f-2ac1c24e3933\") " Nov 26 23:47:48 crc kubenswrapper[4903]: I1126 23:47:48.565626 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/087f719c-fd21-41ea-ae9f-2ac1c24e3933-catalog-content\") pod \"087f719c-fd21-41ea-ae9f-2ac1c24e3933\" (UID: \"087f719c-fd21-41ea-ae9f-2ac1c24e3933\") " Nov 26 23:47:48 crc kubenswrapper[4903]: I1126 23:47:48.565709 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/087f719c-fd21-41ea-ae9f-2ac1c24e3933-utilities\") pod \"087f719c-fd21-41ea-ae9f-2ac1c24e3933\" (UID: \"087f719c-fd21-41ea-ae9f-2ac1c24e3933\") " Nov 26 23:47:48 crc kubenswrapper[4903]: I1126 23:47:48.568834 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/087f719c-fd21-41ea-ae9f-2ac1c24e3933-utilities" (OuterVolumeSpecName: "utilities") pod "087f719c-fd21-41ea-ae9f-2ac1c24e3933" (UID: "087f719c-fd21-41ea-ae9f-2ac1c24e3933"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:47:48 crc kubenswrapper[4903]: I1126 23:47:48.585086 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/087f719c-fd21-41ea-ae9f-2ac1c24e3933-kube-api-access-skt82" (OuterVolumeSpecName: "kube-api-access-skt82") pod "087f719c-fd21-41ea-ae9f-2ac1c24e3933" (UID: "087f719c-fd21-41ea-ae9f-2ac1c24e3933"). InnerVolumeSpecName "kube-api-access-skt82". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:47:48 crc kubenswrapper[4903]: I1126 23:47:48.591799 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/087f719c-fd21-41ea-ae9f-2ac1c24e3933-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "087f719c-fd21-41ea-ae9f-2ac1c24e3933" (UID: "087f719c-fd21-41ea-ae9f-2ac1c24e3933"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:47:48 crc kubenswrapper[4903]: I1126 23:47:48.668632 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-skt82\" (UniqueName: \"kubernetes.io/projected/087f719c-fd21-41ea-ae9f-2ac1c24e3933-kube-api-access-skt82\") on node \"crc\" DevicePath \"\"" Nov 26 23:47:48 crc kubenswrapper[4903]: I1126 23:47:48.668666 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/087f719c-fd21-41ea-ae9f-2ac1c24e3933-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 23:47:48 crc kubenswrapper[4903]: I1126 23:47:48.668676 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/087f719c-fd21-41ea-ae9f-2ac1c24e3933-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 23:47:48 crc kubenswrapper[4903]: I1126 23:47:48.810359 4903 generic.go:334] "Generic (PLEG): container finished" podID="087f719c-fd21-41ea-ae9f-2ac1c24e3933" containerID="6dd4aee65de79dce53d0186a78ebb437f88c057c8cb8bb96d8956b69be30ef67" exitCode=0 Nov 26 23:47:48 crc kubenswrapper[4903]: I1126 23:47:48.810403 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gjj7p" event={"ID":"087f719c-fd21-41ea-ae9f-2ac1c24e3933","Type":"ContainerDied","Data":"6dd4aee65de79dce53d0186a78ebb437f88c057c8cb8bb96d8956b69be30ef67"} Nov 26 23:47:48 crc kubenswrapper[4903]: I1126 23:47:48.810433 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gjj7p" event={"ID":"087f719c-fd21-41ea-ae9f-2ac1c24e3933","Type":"ContainerDied","Data":"f8e458596e7a9505e458626d5e65e27de707e2fdf92729677de00d45a920ddd5"} Nov 26 23:47:48 crc kubenswrapper[4903]: I1126 23:47:48.810450 4903 scope.go:117] "RemoveContainer" containerID="6dd4aee65de79dce53d0186a78ebb437f88c057c8cb8bb96d8956b69be30ef67" Nov 26 23:47:48 crc kubenswrapper[4903]: I1126 23:47:48.810532 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gjj7p" Nov 26 23:47:48 crc kubenswrapper[4903]: I1126 23:47:48.875132 4903 scope.go:117] "RemoveContainer" containerID="a9644ee458c2d09c376b4257d1099d2be493962a700c5f046253ba85ec9b2fcd" Nov 26 23:47:48 crc kubenswrapper[4903]: I1126 23:47:48.907742 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gjj7p"] Nov 26 23:47:48 crc kubenswrapper[4903]: I1126 23:47:48.928748 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-gjj7p"] Nov 26 23:47:48 crc kubenswrapper[4903]: I1126 23:47:48.980062 4903 scope.go:117] "RemoveContainer" containerID="c98ba747be1172f7fd1eab8b52300ee7d9a8ee006c8e6fc1ec1a4fa8261015b1" Nov 26 23:47:49 crc kubenswrapper[4903]: I1126 23:47:49.027942 4903 scope.go:117] "RemoveContainer" containerID="6dd4aee65de79dce53d0186a78ebb437f88c057c8cb8bb96d8956b69be30ef67" Nov 26 23:47:49 crc kubenswrapper[4903]: E1126 23:47:49.028340 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6dd4aee65de79dce53d0186a78ebb437f88c057c8cb8bb96d8956b69be30ef67\": container with ID starting with 6dd4aee65de79dce53d0186a78ebb437f88c057c8cb8bb96d8956b69be30ef67 not found: ID does not exist" containerID="6dd4aee65de79dce53d0186a78ebb437f88c057c8cb8bb96d8956b69be30ef67" Nov 26 23:47:49 crc kubenswrapper[4903]: I1126 23:47:49.028382 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6dd4aee65de79dce53d0186a78ebb437f88c057c8cb8bb96d8956b69be30ef67"} err="failed to get container status \"6dd4aee65de79dce53d0186a78ebb437f88c057c8cb8bb96d8956b69be30ef67\": rpc error: code = NotFound desc = could not find container \"6dd4aee65de79dce53d0186a78ebb437f88c057c8cb8bb96d8956b69be30ef67\": container with ID starting with 6dd4aee65de79dce53d0186a78ebb437f88c057c8cb8bb96d8956b69be30ef67 not found: ID does not exist" Nov 26 23:47:49 crc kubenswrapper[4903]: I1126 23:47:49.028410 4903 scope.go:117] "RemoveContainer" containerID="a9644ee458c2d09c376b4257d1099d2be493962a700c5f046253ba85ec9b2fcd" Nov 26 23:47:49 crc kubenswrapper[4903]: E1126 23:47:49.031994 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a9644ee458c2d09c376b4257d1099d2be493962a700c5f046253ba85ec9b2fcd\": container with ID starting with a9644ee458c2d09c376b4257d1099d2be493962a700c5f046253ba85ec9b2fcd not found: ID does not exist" containerID="a9644ee458c2d09c376b4257d1099d2be493962a700c5f046253ba85ec9b2fcd" Nov 26 23:47:49 crc kubenswrapper[4903]: I1126 23:47:49.032034 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a9644ee458c2d09c376b4257d1099d2be493962a700c5f046253ba85ec9b2fcd"} err="failed to get container status \"a9644ee458c2d09c376b4257d1099d2be493962a700c5f046253ba85ec9b2fcd\": rpc error: code = NotFound desc = could not find container \"a9644ee458c2d09c376b4257d1099d2be493962a700c5f046253ba85ec9b2fcd\": container with ID starting with a9644ee458c2d09c376b4257d1099d2be493962a700c5f046253ba85ec9b2fcd not found: ID does not exist" Nov 26 23:47:49 crc kubenswrapper[4903]: I1126 23:47:49.032060 4903 scope.go:117] "RemoveContainer" containerID="c98ba747be1172f7fd1eab8b52300ee7d9a8ee006c8e6fc1ec1a4fa8261015b1" Nov 26 23:47:49 crc kubenswrapper[4903]: E1126 23:47:49.032307 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c98ba747be1172f7fd1eab8b52300ee7d9a8ee006c8e6fc1ec1a4fa8261015b1\": container with ID starting with c98ba747be1172f7fd1eab8b52300ee7d9a8ee006c8e6fc1ec1a4fa8261015b1 not found: ID does not exist" containerID="c98ba747be1172f7fd1eab8b52300ee7d9a8ee006c8e6fc1ec1a4fa8261015b1" Nov 26 23:47:49 crc kubenswrapper[4903]: I1126 23:47:49.032399 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c98ba747be1172f7fd1eab8b52300ee7d9a8ee006c8e6fc1ec1a4fa8261015b1"} err="failed to get container status \"c98ba747be1172f7fd1eab8b52300ee7d9a8ee006c8e6fc1ec1a4fa8261015b1\": rpc error: code = NotFound desc = could not find container \"c98ba747be1172f7fd1eab8b52300ee7d9a8ee006c8e6fc1ec1a4fa8261015b1\": container with ID starting with c98ba747be1172f7fd1eab8b52300ee7d9a8ee006c8e6fc1ec1a4fa8261015b1 not found: ID does not exist" Nov 26 23:47:50 crc kubenswrapper[4903]: I1126 23:47:50.089925 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="087f719c-fd21-41ea-ae9f-2ac1c24e3933" path="/var/lib/kubelet/pods/087f719c-fd21-41ea-ae9f-2ac1c24e3933/volumes" Nov 26 23:48:31 crc kubenswrapper[4903]: I1126 23:48:31.981332 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 23:48:31 crc kubenswrapper[4903]: I1126 23:48:31.982230 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 23:49:01 crc kubenswrapper[4903]: I1126 23:49:01.981471 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 23:49:01 crc kubenswrapper[4903]: I1126 23:49:01.982089 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 23:49:25 crc kubenswrapper[4903]: I1126 23:49:25.605041 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-6v8tx"] Nov 26 23:49:25 crc kubenswrapper[4903]: E1126 23:49:25.606950 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="087f719c-fd21-41ea-ae9f-2ac1c24e3933" containerName="extract-content" Nov 26 23:49:25 crc kubenswrapper[4903]: I1126 23:49:25.606981 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="087f719c-fd21-41ea-ae9f-2ac1c24e3933" containerName="extract-content" Nov 26 23:49:25 crc kubenswrapper[4903]: E1126 23:49:25.607112 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="087f719c-fd21-41ea-ae9f-2ac1c24e3933" containerName="registry-server" Nov 26 23:49:25 crc kubenswrapper[4903]: I1126 23:49:25.607129 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="087f719c-fd21-41ea-ae9f-2ac1c24e3933" containerName="registry-server" Nov 26 23:49:25 crc kubenswrapper[4903]: E1126 23:49:25.607174 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="087f719c-fd21-41ea-ae9f-2ac1c24e3933" containerName="extract-utilities" Nov 26 23:49:25 crc kubenswrapper[4903]: I1126 23:49:25.607191 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="087f719c-fd21-41ea-ae9f-2ac1c24e3933" containerName="extract-utilities" Nov 26 23:49:25 crc kubenswrapper[4903]: I1126 23:49:25.607821 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="087f719c-fd21-41ea-ae9f-2ac1c24e3933" containerName="registry-server" Nov 26 23:49:25 crc kubenswrapper[4903]: I1126 23:49:25.611449 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6v8tx" Nov 26 23:49:25 crc kubenswrapper[4903]: I1126 23:49:25.636929 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6v8tx"] Nov 26 23:49:25 crc kubenswrapper[4903]: I1126 23:49:25.646494 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4632b406-3040-40a9-a78b-3e5990d42264-catalog-content\") pod \"redhat-operators-6v8tx\" (UID: \"4632b406-3040-40a9-a78b-3e5990d42264\") " pod="openshift-marketplace/redhat-operators-6v8tx" Nov 26 23:49:25 crc kubenswrapper[4903]: I1126 23:49:25.646587 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4632b406-3040-40a9-a78b-3e5990d42264-utilities\") pod \"redhat-operators-6v8tx\" (UID: \"4632b406-3040-40a9-a78b-3e5990d42264\") " pod="openshift-marketplace/redhat-operators-6v8tx" Nov 26 23:49:25 crc kubenswrapper[4903]: I1126 23:49:25.646797 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fmg7b\" (UniqueName: \"kubernetes.io/projected/4632b406-3040-40a9-a78b-3e5990d42264-kube-api-access-fmg7b\") pod \"redhat-operators-6v8tx\" (UID: \"4632b406-3040-40a9-a78b-3e5990d42264\") " pod="openshift-marketplace/redhat-operators-6v8tx" Nov 26 23:49:25 crc kubenswrapper[4903]: I1126 23:49:25.749384 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4632b406-3040-40a9-a78b-3e5990d42264-catalog-content\") pod \"redhat-operators-6v8tx\" (UID: \"4632b406-3040-40a9-a78b-3e5990d42264\") " pod="openshift-marketplace/redhat-operators-6v8tx" Nov 26 23:49:25 crc kubenswrapper[4903]: I1126 23:49:25.749878 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4632b406-3040-40a9-a78b-3e5990d42264-utilities\") pod \"redhat-operators-6v8tx\" (UID: \"4632b406-3040-40a9-a78b-3e5990d42264\") " pod="openshift-marketplace/redhat-operators-6v8tx" Nov 26 23:49:25 crc kubenswrapper[4903]: I1126 23:49:25.749969 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4632b406-3040-40a9-a78b-3e5990d42264-catalog-content\") pod \"redhat-operators-6v8tx\" (UID: \"4632b406-3040-40a9-a78b-3e5990d42264\") " pod="openshift-marketplace/redhat-operators-6v8tx" Nov 26 23:49:25 crc kubenswrapper[4903]: I1126 23:49:25.750226 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fmg7b\" (UniqueName: \"kubernetes.io/projected/4632b406-3040-40a9-a78b-3e5990d42264-kube-api-access-fmg7b\") pod \"redhat-operators-6v8tx\" (UID: \"4632b406-3040-40a9-a78b-3e5990d42264\") " pod="openshift-marketplace/redhat-operators-6v8tx" Nov 26 23:49:25 crc kubenswrapper[4903]: I1126 23:49:25.750428 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4632b406-3040-40a9-a78b-3e5990d42264-utilities\") pod \"redhat-operators-6v8tx\" (UID: \"4632b406-3040-40a9-a78b-3e5990d42264\") " pod="openshift-marketplace/redhat-operators-6v8tx" Nov 26 23:49:26 crc kubenswrapper[4903]: I1126 23:49:26.481263 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fmg7b\" (UniqueName: \"kubernetes.io/projected/4632b406-3040-40a9-a78b-3e5990d42264-kube-api-access-fmg7b\") pod \"redhat-operators-6v8tx\" (UID: \"4632b406-3040-40a9-a78b-3e5990d42264\") " pod="openshift-marketplace/redhat-operators-6v8tx" Nov 26 23:49:26 crc kubenswrapper[4903]: I1126 23:49:26.536107 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6v8tx" Nov 26 23:49:27 crc kubenswrapper[4903]: I1126 23:49:27.120809 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6v8tx"] Nov 26 23:49:27 crc kubenswrapper[4903]: I1126 23:49:27.411928 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6v8tx" event={"ID":"4632b406-3040-40a9-a78b-3e5990d42264","Type":"ContainerStarted","Data":"410932dd3d0add1329ce9bc8fc33edc16f88e743a5f5e318fc022f6e7c13ce62"} Nov 26 23:49:27 crc kubenswrapper[4903]: I1126 23:49:27.412269 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6v8tx" event={"ID":"4632b406-3040-40a9-a78b-3e5990d42264","Type":"ContainerStarted","Data":"8826f845e7cf3719d528ab88a4b0a1dcd9bece019deb6d70fe9b3b98f5838e04"} Nov 26 23:49:28 crc kubenswrapper[4903]: I1126 23:49:28.432245 4903 generic.go:334] "Generic (PLEG): container finished" podID="4632b406-3040-40a9-a78b-3e5990d42264" containerID="410932dd3d0add1329ce9bc8fc33edc16f88e743a5f5e318fc022f6e7c13ce62" exitCode=0 Nov 26 23:49:28 crc kubenswrapper[4903]: I1126 23:49:28.432334 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6v8tx" event={"ID":"4632b406-3040-40a9-a78b-3e5990d42264","Type":"ContainerDied","Data":"410932dd3d0add1329ce9bc8fc33edc16f88e743a5f5e318fc022f6e7c13ce62"} Nov 26 23:49:29 crc kubenswrapper[4903]: I1126 23:49:29.452193 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6v8tx" event={"ID":"4632b406-3040-40a9-a78b-3e5990d42264","Type":"ContainerStarted","Data":"ac6c7a62f1575bf104fb0718ea826178f93c1727dfd159cf2361b2186e5d6f20"} Nov 26 23:49:31 crc kubenswrapper[4903]: I1126 23:49:31.981512 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 23:49:31 crc kubenswrapper[4903]: I1126 23:49:31.982203 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 23:49:31 crc kubenswrapper[4903]: I1126 23:49:31.982256 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 23:49:31 crc kubenswrapper[4903]: I1126 23:49:31.983309 4903 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084"} pod="openshift-machine-config-operator/machine-config-daemon-wjwph" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 23:49:31 crc kubenswrapper[4903]: I1126 23:49:31.983368 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" containerID="cri-o://8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084" gracePeriod=600 Nov 26 23:49:33 crc kubenswrapper[4903]: I1126 23:49:33.514369 4903 generic.go:334] "Generic (PLEG): container finished" podID="4632b406-3040-40a9-a78b-3e5990d42264" containerID="ac6c7a62f1575bf104fb0718ea826178f93c1727dfd159cf2361b2186e5d6f20" exitCode=0 Nov 26 23:49:33 crc kubenswrapper[4903]: I1126 23:49:33.514485 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6v8tx" event={"ID":"4632b406-3040-40a9-a78b-3e5990d42264","Type":"ContainerDied","Data":"ac6c7a62f1575bf104fb0718ea826178f93c1727dfd159cf2361b2186e5d6f20"} Nov 26 23:49:33 crc kubenswrapper[4903]: I1126 23:49:33.520035 4903 generic.go:334] "Generic (PLEG): container finished" podID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerID="8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084" exitCode=0 Nov 26 23:49:33 crc kubenswrapper[4903]: I1126 23:49:33.520128 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerDied","Data":"8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084"} Nov 26 23:49:33 crc kubenswrapper[4903]: I1126 23:49:33.520196 4903 scope.go:117] "RemoveContainer" containerID="f1d32d64bc9a53cf86e63de331b7164e7ef8e8d60eb2f6fec19ce11a2354de21" Nov 26 23:49:33 crc kubenswrapper[4903]: E1126 23:49:33.936123 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:49:34 crc kubenswrapper[4903]: I1126 23:49:34.535831 4903 scope.go:117] "RemoveContainer" containerID="8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084" Nov 26 23:49:34 crc kubenswrapper[4903]: E1126 23:49:34.536733 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:49:34 crc kubenswrapper[4903]: I1126 23:49:34.540252 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6v8tx" event={"ID":"4632b406-3040-40a9-a78b-3e5990d42264","Type":"ContainerStarted","Data":"be838df3bb369bbca778aee824d300b4f79a797a06f1638b3f3d219b3205df8e"} Nov 26 23:49:34 crc kubenswrapper[4903]: I1126 23:49:34.608604 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-6v8tx" podStartSLOduration=4.058647317 podStartE2EDuration="9.608568449s" podCreationTimestamp="2025-11-26 23:49:25 +0000 UTC" firstStartedPulling="2025-11-26 23:49:28.435973274 +0000 UTC m=+5297.126208224" lastFinishedPulling="2025-11-26 23:49:33.985894406 +0000 UTC m=+5302.676129356" observedRunningTime="2025-11-26 23:49:34.602783555 +0000 UTC m=+5303.293018505" watchObservedRunningTime="2025-11-26 23:49:34.608568449 +0000 UTC m=+5303.298803409" Nov 26 23:49:36 crc kubenswrapper[4903]: I1126 23:49:36.536920 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-6v8tx" Nov 26 23:49:36 crc kubenswrapper[4903]: I1126 23:49:36.537284 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-6v8tx" Nov 26 23:49:37 crc kubenswrapper[4903]: I1126 23:49:37.596719 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-6v8tx" podUID="4632b406-3040-40a9-a78b-3e5990d42264" containerName="registry-server" probeResult="failure" output=< Nov 26 23:49:37 crc kubenswrapper[4903]: timeout: failed to connect service ":50051" within 1s Nov 26 23:49:37 crc kubenswrapper[4903]: > Nov 26 23:49:46 crc kubenswrapper[4903]: I1126 23:49:46.594226 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-6v8tx" Nov 26 23:49:46 crc kubenswrapper[4903]: I1126 23:49:46.650195 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-6v8tx" Nov 26 23:49:46 crc kubenswrapper[4903]: I1126 23:49:46.840756 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6v8tx"] Nov 26 23:49:47 crc kubenswrapper[4903]: I1126 23:49:47.711455 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-6v8tx" podUID="4632b406-3040-40a9-a78b-3e5990d42264" containerName="registry-server" containerID="cri-o://be838df3bb369bbca778aee824d300b4f79a797a06f1638b3f3d219b3205df8e" gracePeriod=2 Nov 26 23:49:48 crc kubenswrapper[4903]: I1126 23:49:48.029514 4903 scope.go:117] "RemoveContainer" containerID="8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084" Nov 26 23:49:48 crc kubenswrapper[4903]: E1126 23:49:48.030073 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:49:48 crc kubenswrapper[4903]: I1126 23:49:48.318860 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6v8tx" Nov 26 23:49:48 crc kubenswrapper[4903]: I1126 23:49:48.404665 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4632b406-3040-40a9-a78b-3e5990d42264-catalog-content\") pod \"4632b406-3040-40a9-a78b-3e5990d42264\" (UID: \"4632b406-3040-40a9-a78b-3e5990d42264\") " Nov 26 23:49:48 crc kubenswrapper[4903]: I1126 23:49:48.404922 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fmg7b\" (UniqueName: \"kubernetes.io/projected/4632b406-3040-40a9-a78b-3e5990d42264-kube-api-access-fmg7b\") pod \"4632b406-3040-40a9-a78b-3e5990d42264\" (UID: \"4632b406-3040-40a9-a78b-3e5990d42264\") " Nov 26 23:49:48 crc kubenswrapper[4903]: I1126 23:49:48.405022 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4632b406-3040-40a9-a78b-3e5990d42264-utilities\") pod \"4632b406-3040-40a9-a78b-3e5990d42264\" (UID: \"4632b406-3040-40a9-a78b-3e5990d42264\") " Nov 26 23:49:48 crc kubenswrapper[4903]: I1126 23:49:48.405973 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4632b406-3040-40a9-a78b-3e5990d42264-utilities" (OuterVolumeSpecName: "utilities") pod "4632b406-3040-40a9-a78b-3e5990d42264" (UID: "4632b406-3040-40a9-a78b-3e5990d42264"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:49:48 crc kubenswrapper[4903]: I1126 23:49:48.413167 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4632b406-3040-40a9-a78b-3e5990d42264-kube-api-access-fmg7b" (OuterVolumeSpecName: "kube-api-access-fmg7b") pod "4632b406-3040-40a9-a78b-3e5990d42264" (UID: "4632b406-3040-40a9-a78b-3e5990d42264"). InnerVolumeSpecName "kube-api-access-fmg7b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:49:48 crc kubenswrapper[4903]: I1126 23:49:48.500000 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4632b406-3040-40a9-a78b-3e5990d42264-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4632b406-3040-40a9-a78b-3e5990d42264" (UID: "4632b406-3040-40a9-a78b-3e5990d42264"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:49:48 crc kubenswrapper[4903]: I1126 23:49:48.507825 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4632b406-3040-40a9-a78b-3e5990d42264-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 23:49:48 crc kubenswrapper[4903]: I1126 23:49:48.508202 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fmg7b\" (UniqueName: \"kubernetes.io/projected/4632b406-3040-40a9-a78b-3e5990d42264-kube-api-access-fmg7b\") on node \"crc\" DevicePath \"\"" Nov 26 23:49:48 crc kubenswrapper[4903]: I1126 23:49:48.508222 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4632b406-3040-40a9-a78b-3e5990d42264-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 23:49:48 crc kubenswrapper[4903]: I1126 23:49:48.734459 4903 generic.go:334] "Generic (PLEG): container finished" podID="4632b406-3040-40a9-a78b-3e5990d42264" containerID="be838df3bb369bbca778aee824d300b4f79a797a06f1638b3f3d219b3205df8e" exitCode=0 Nov 26 23:49:48 crc kubenswrapper[4903]: I1126 23:49:48.734511 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6v8tx" event={"ID":"4632b406-3040-40a9-a78b-3e5990d42264","Type":"ContainerDied","Data":"be838df3bb369bbca778aee824d300b4f79a797a06f1638b3f3d219b3205df8e"} Nov 26 23:49:48 crc kubenswrapper[4903]: I1126 23:49:48.734549 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6v8tx" event={"ID":"4632b406-3040-40a9-a78b-3e5990d42264","Type":"ContainerDied","Data":"8826f845e7cf3719d528ab88a4b0a1dcd9bece019deb6d70fe9b3b98f5838e04"} Nov 26 23:49:48 crc kubenswrapper[4903]: I1126 23:49:48.734553 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6v8tx" Nov 26 23:49:48 crc kubenswrapper[4903]: I1126 23:49:48.734577 4903 scope.go:117] "RemoveContainer" containerID="be838df3bb369bbca778aee824d300b4f79a797a06f1638b3f3d219b3205df8e" Nov 26 23:49:48 crc kubenswrapper[4903]: I1126 23:49:48.761613 4903 scope.go:117] "RemoveContainer" containerID="ac6c7a62f1575bf104fb0718ea826178f93c1727dfd159cf2361b2186e5d6f20" Nov 26 23:49:48 crc kubenswrapper[4903]: I1126 23:49:48.786389 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6v8tx"] Nov 26 23:49:48 crc kubenswrapper[4903]: I1126 23:49:48.797369 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-6v8tx"] Nov 26 23:49:48 crc kubenswrapper[4903]: I1126 23:49:48.803898 4903 scope.go:117] "RemoveContainer" containerID="410932dd3d0add1329ce9bc8fc33edc16f88e743a5f5e318fc022f6e7c13ce62" Nov 26 23:49:48 crc kubenswrapper[4903]: I1126 23:49:48.855534 4903 scope.go:117] "RemoveContainer" containerID="be838df3bb369bbca778aee824d300b4f79a797a06f1638b3f3d219b3205df8e" Nov 26 23:49:48 crc kubenswrapper[4903]: E1126 23:49:48.856039 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be838df3bb369bbca778aee824d300b4f79a797a06f1638b3f3d219b3205df8e\": container with ID starting with be838df3bb369bbca778aee824d300b4f79a797a06f1638b3f3d219b3205df8e not found: ID does not exist" containerID="be838df3bb369bbca778aee824d300b4f79a797a06f1638b3f3d219b3205df8e" Nov 26 23:49:48 crc kubenswrapper[4903]: I1126 23:49:48.856091 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be838df3bb369bbca778aee824d300b4f79a797a06f1638b3f3d219b3205df8e"} err="failed to get container status \"be838df3bb369bbca778aee824d300b4f79a797a06f1638b3f3d219b3205df8e\": rpc error: code = NotFound desc = could not find container \"be838df3bb369bbca778aee824d300b4f79a797a06f1638b3f3d219b3205df8e\": container with ID starting with be838df3bb369bbca778aee824d300b4f79a797a06f1638b3f3d219b3205df8e not found: ID does not exist" Nov 26 23:49:48 crc kubenswrapper[4903]: I1126 23:49:48.856123 4903 scope.go:117] "RemoveContainer" containerID="ac6c7a62f1575bf104fb0718ea826178f93c1727dfd159cf2361b2186e5d6f20" Nov 26 23:49:48 crc kubenswrapper[4903]: E1126 23:49:48.856614 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac6c7a62f1575bf104fb0718ea826178f93c1727dfd159cf2361b2186e5d6f20\": container with ID starting with ac6c7a62f1575bf104fb0718ea826178f93c1727dfd159cf2361b2186e5d6f20 not found: ID does not exist" containerID="ac6c7a62f1575bf104fb0718ea826178f93c1727dfd159cf2361b2186e5d6f20" Nov 26 23:49:48 crc kubenswrapper[4903]: I1126 23:49:48.856647 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac6c7a62f1575bf104fb0718ea826178f93c1727dfd159cf2361b2186e5d6f20"} err="failed to get container status \"ac6c7a62f1575bf104fb0718ea826178f93c1727dfd159cf2361b2186e5d6f20\": rpc error: code = NotFound desc = could not find container \"ac6c7a62f1575bf104fb0718ea826178f93c1727dfd159cf2361b2186e5d6f20\": container with ID starting with ac6c7a62f1575bf104fb0718ea826178f93c1727dfd159cf2361b2186e5d6f20 not found: ID does not exist" Nov 26 23:49:48 crc kubenswrapper[4903]: I1126 23:49:48.856665 4903 scope.go:117] "RemoveContainer" containerID="410932dd3d0add1329ce9bc8fc33edc16f88e743a5f5e318fc022f6e7c13ce62" Nov 26 23:49:48 crc kubenswrapper[4903]: E1126 23:49:48.857039 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"410932dd3d0add1329ce9bc8fc33edc16f88e743a5f5e318fc022f6e7c13ce62\": container with ID starting with 410932dd3d0add1329ce9bc8fc33edc16f88e743a5f5e318fc022f6e7c13ce62 not found: ID does not exist" containerID="410932dd3d0add1329ce9bc8fc33edc16f88e743a5f5e318fc022f6e7c13ce62" Nov 26 23:49:48 crc kubenswrapper[4903]: I1126 23:49:48.857131 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"410932dd3d0add1329ce9bc8fc33edc16f88e743a5f5e318fc022f6e7c13ce62"} err="failed to get container status \"410932dd3d0add1329ce9bc8fc33edc16f88e743a5f5e318fc022f6e7c13ce62\": rpc error: code = NotFound desc = could not find container \"410932dd3d0add1329ce9bc8fc33edc16f88e743a5f5e318fc022f6e7c13ce62\": container with ID starting with 410932dd3d0add1329ce9bc8fc33edc16f88e743a5f5e318fc022f6e7c13ce62 not found: ID does not exist" Nov 26 23:49:50 crc kubenswrapper[4903]: I1126 23:49:50.042590 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4632b406-3040-40a9-a78b-3e5990d42264" path="/var/lib/kubelet/pods/4632b406-3040-40a9-a78b-3e5990d42264/volumes" Nov 26 23:50:03 crc kubenswrapper[4903]: I1126 23:50:03.028453 4903 scope.go:117] "RemoveContainer" containerID="8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084" Nov 26 23:50:03 crc kubenswrapper[4903]: E1126 23:50:03.029354 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:50:17 crc kubenswrapper[4903]: I1126 23:50:17.029241 4903 scope.go:117] "RemoveContainer" containerID="8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084" Nov 26 23:50:17 crc kubenswrapper[4903]: E1126 23:50:17.030371 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:50:29 crc kubenswrapper[4903]: I1126 23:50:29.029169 4903 scope.go:117] "RemoveContainer" containerID="8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084" Nov 26 23:50:29 crc kubenswrapper[4903]: E1126 23:50:29.030215 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:50:40 crc kubenswrapper[4903]: I1126 23:50:40.030400 4903 scope.go:117] "RemoveContainer" containerID="8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084" Nov 26 23:50:40 crc kubenswrapper[4903]: E1126 23:50:40.031666 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:50:40 crc kubenswrapper[4903]: I1126 23:50:40.085573 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-c8787"] Nov 26 23:50:40 crc kubenswrapper[4903]: E1126 23:50:40.086969 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4632b406-3040-40a9-a78b-3e5990d42264" containerName="extract-utilities" Nov 26 23:50:40 crc kubenswrapper[4903]: I1126 23:50:40.087008 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="4632b406-3040-40a9-a78b-3e5990d42264" containerName="extract-utilities" Nov 26 23:50:40 crc kubenswrapper[4903]: E1126 23:50:40.087086 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4632b406-3040-40a9-a78b-3e5990d42264" containerName="extract-content" Nov 26 23:50:40 crc kubenswrapper[4903]: I1126 23:50:40.087107 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="4632b406-3040-40a9-a78b-3e5990d42264" containerName="extract-content" Nov 26 23:50:40 crc kubenswrapper[4903]: E1126 23:50:40.087164 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4632b406-3040-40a9-a78b-3e5990d42264" containerName="registry-server" Nov 26 23:50:40 crc kubenswrapper[4903]: I1126 23:50:40.087183 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="4632b406-3040-40a9-a78b-3e5990d42264" containerName="registry-server" Nov 26 23:50:40 crc kubenswrapper[4903]: I1126 23:50:40.087841 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="4632b406-3040-40a9-a78b-3e5990d42264" containerName="registry-server" Nov 26 23:50:40 crc kubenswrapper[4903]: I1126 23:50:40.091767 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c8787" Nov 26 23:50:40 crc kubenswrapper[4903]: I1126 23:50:40.104560 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c8787"] Nov 26 23:50:40 crc kubenswrapper[4903]: I1126 23:50:40.157365 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b15a6d5-2bfd-41cd-9178-f35011bc663f-utilities\") pod \"certified-operators-c8787\" (UID: \"4b15a6d5-2bfd-41cd-9178-f35011bc663f\") " pod="openshift-marketplace/certified-operators-c8787" Nov 26 23:50:40 crc kubenswrapper[4903]: I1126 23:50:40.159048 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cb7jd\" (UniqueName: \"kubernetes.io/projected/4b15a6d5-2bfd-41cd-9178-f35011bc663f-kube-api-access-cb7jd\") pod \"certified-operators-c8787\" (UID: \"4b15a6d5-2bfd-41cd-9178-f35011bc663f\") " pod="openshift-marketplace/certified-operators-c8787" Nov 26 23:50:40 crc kubenswrapper[4903]: I1126 23:50:40.159440 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b15a6d5-2bfd-41cd-9178-f35011bc663f-catalog-content\") pod \"certified-operators-c8787\" (UID: \"4b15a6d5-2bfd-41cd-9178-f35011bc663f\") " pod="openshift-marketplace/certified-operators-c8787" Nov 26 23:50:40 crc kubenswrapper[4903]: I1126 23:50:40.261727 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cb7jd\" (UniqueName: \"kubernetes.io/projected/4b15a6d5-2bfd-41cd-9178-f35011bc663f-kube-api-access-cb7jd\") pod \"certified-operators-c8787\" (UID: \"4b15a6d5-2bfd-41cd-9178-f35011bc663f\") " pod="openshift-marketplace/certified-operators-c8787" Nov 26 23:50:40 crc kubenswrapper[4903]: I1126 23:50:40.261833 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b15a6d5-2bfd-41cd-9178-f35011bc663f-catalog-content\") pod \"certified-operators-c8787\" (UID: \"4b15a6d5-2bfd-41cd-9178-f35011bc663f\") " pod="openshift-marketplace/certified-operators-c8787" Nov 26 23:50:40 crc kubenswrapper[4903]: I1126 23:50:40.261944 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b15a6d5-2bfd-41cd-9178-f35011bc663f-utilities\") pod \"certified-operators-c8787\" (UID: \"4b15a6d5-2bfd-41cd-9178-f35011bc663f\") " pod="openshift-marketplace/certified-operators-c8787" Nov 26 23:50:40 crc kubenswrapper[4903]: I1126 23:50:40.262430 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b15a6d5-2bfd-41cd-9178-f35011bc663f-catalog-content\") pod \"certified-operators-c8787\" (UID: \"4b15a6d5-2bfd-41cd-9178-f35011bc663f\") " pod="openshift-marketplace/certified-operators-c8787" Nov 26 23:50:40 crc kubenswrapper[4903]: I1126 23:50:40.262506 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b15a6d5-2bfd-41cd-9178-f35011bc663f-utilities\") pod \"certified-operators-c8787\" (UID: \"4b15a6d5-2bfd-41cd-9178-f35011bc663f\") " pod="openshift-marketplace/certified-operators-c8787" Nov 26 23:50:40 crc kubenswrapper[4903]: I1126 23:50:40.588601 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cb7jd\" (UniqueName: \"kubernetes.io/projected/4b15a6d5-2bfd-41cd-9178-f35011bc663f-kube-api-access-cb7jd\") pod \"certified-operators-c8787\" (UID: \"4b15a6d5-2bfd-41cd-9178-f35011bc663f\") " pod="openshift-marketplace/certified-operators-c8787" Nov 26 23:50:40 crc kubenswrapper[4903]: I1126 23:50:40.726410 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c8787" Nov 26 23:50:41 crc kubenswrapper[4903]: I1126 23:50:41.258330 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c8787"] Nov 26 23:50:41 crc kubenswrapper[4903]: I1126 23:50:41.496918 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c8787" event={"ID":"4b15a6d5-2bfd-41cd-9178-f35011bc663f","Type":"ContainerStarted","Data":"e41cf2acec4ef7221b426969a84ebdb3f5cf2ea4146511c17dbb2c481dfcdb0d"} Nov 26 23:50:42 crc kubenswrapper[4903]: I1126 23:50:42.507241 4903 generic.go:334] "Generic (PLEG): container finished" podID="4b15a6d5-2bfd-41cd-9178-f35011bc663f" containerID="327b33d8ad863a39109a7d86a8866e845fb855b232f95a7943259dde1601636e" exitCode=0 Nov 26 23:50:42 crc kubenswrapper[4903]: I1126 23:50:42.507343 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c8787" event={"ID":"4b15a6d5-2bfd-41cd-9178-f35011bc663f","Type":"ContainerDied","Data":"327b33d8ad863a39109a7d86a8866e845fb855b232f95a7943259dde1601636e"} Nov 26 23:50:44 crc kubenswrapper[4903]: I1126 23:50:44.542754 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c8787" event={"ID":"4b15a6d5-2bfd-41cd-9178-f35011bc663f","Type":"ContainerStarted","Data":"dd4cf27d617135c1fec1514c4d670259e0d857014ce76addecb4b1393c0fc1a8"} Nov 26 23:50:44 crc kubenswrapper[4903]: E1126 23:50:44.889266 4903 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4b15a6d5_2bfd_41cd_9178_f35011bc663f.slice/crio-conmon-dd4cf27d617135c1fec1514c4d670259e0d857014ce76addecb4b1393c0fc1a8.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4b15a6d5_2bfd_41cd_9178_f35011bc663f.slice/crio-dd4cf27d617135c1fec1514c4d670259e0d857014ce76addecb4b1393c0fc1a8.scope\": RecentStats: unable to find data in memory cache]" Nov 26 23:50:45 crc kubenswrapper[4903]: I1126 23:50:45.562061 4903 generic.go:334] "Generic (PLEG): container finished" podID="4b15a6d5-2bfd-41cd-9178-f35011bc663f" containerID="dd4cf27d617135c1fec1514c4d670259e0d857014ce76addecb4b1393c0fc1a8" exitCode=0 Nov 26 23:50:45 crc kubenswrapper[4903]: I1126 23:50:45.562114 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c8787" event={"ID":"4b15a6d5-2bfd-41cd-9178-f35011bc663f","Type":"ContainerDied","Data":"dd4cf27d617135c1fec1514c4d670259e0d857014ce76addecb4b1393c0fc1a8"} Nov 26 23:50:46 crc kubenswrapper[4903]: I1126 23:50:46.574806 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c8787" event={"ID":"4b15a6d5-2bfd-41cd-9178-f35011bc663f","Type":"ContainerStarted","Data":"6389b694523f3610db23a7d9eb927cdb846dea850e2da3598cc34c48901dab91"} Nov 26 23:50:46 crc kubenswrapper[4903]: I1126 23:50:46.601357 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-c8787" podStartSLOduration=3.00786027 podStartE2EDuration="6.601338545s" podCreationTimestamp="2025-11-26 23:50:40 +0000 UTC" firstStartedPulling="2025-11-26 23:50:42.509417879 +0000 UTC m=+5371.199652789" lastFinishedPulling="2025-11-26 23:50:46.102896154 +0000 UTC m=+5374.793131064" observedRunningTime="2025-11-26 23:50:46.590592157 +0000 UTC m=+5375.280827067" watchObservedRunningTime="2025-11-26 23:50:46.601338545 +0000 UTC m=+5375.291573455" Nov 26 23:50:50 crc kubenswrapper[4903]: I1126 23:50:50.727102 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-c8787" Nov 26 23:50:50 crc kubenswrapper[4903]: I1126 23:50:50.729064 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-c8787" Nov 26 23:50:50 crc kubenswrapper[4903]: I1126 23:50:50.777988 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-c8787" Nov 26 23:50:51 crc kubenswrapper[4903]: I1126 23:50:51.694461 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-c8787" Nov 26 23:50:51 crc kubenswrapper[4903]: I1126 23:50:51.763076 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c8787"] Nov 26 23:50:52 crc kubenswrapper[4903]: I1126 23:50:52.036801 4903 scope.go:117] "RemoveContainer" containerID="8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084" Nov 26 23:50:52 crc kubenswrapper[4903]: E1126 23:50:52.037086 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:50:53 crc kubenswrapper[4903]: I1126 23:50:53.662092 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-c8787" podUID="4b15a6d5-2bfd-41cd-9178-f35011bc663f" containerName="registry-server" containerID="cri-o://6389b694523f3610db23a7d9eb927cdb846dea850e2da3598cc34c48901dab91" gracePeriod=2 Nov 26 23:50:54 crc kubenswrapper[4903]: I1126 23:50:54.229268 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c8787" Nov 26 23:50:54 crc kubenswrapper[4903]: I1126 23:50:54.259659 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cb7jd\" (UniqueName: \"kubernetes.io/projected/4b15a6d5-2bfd-41cd-9178-f35011bc663f-kube-api-access-cb7jd\") pod \"4b15a6d5-2bfd-41cd-9178-f35011bc663f\" (UID: \"4b15a6d5-2bfd-41cd-9178-f35011bc663f\") " Nov 26 23:50:54 crc kubenswrapper[4903]: I1126 23:50:54.259737 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b15a6d5-2bfd-41cd-9178-f35011bc663f-catalog-content\") pod \"4b15a6d5-2bfd-41cd-9178-f35011bc663f\" (UID: \"4b15a6d5-2bfd-41cd-9178-f35011bc663f\") " Nov 26 23:50:54 crc kubenswrapper[4903]: I1126 23:50:54.259817 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b15a6d5-2bfd-41cd-9178-f35011bc663f-utilities\") pod \"4b15a6d5-2bfd-41cd-9178-f35011bc663f\" (UID: \"4b15a6d5-2bfd-41cd-9178-f35011bc663f\") " Nov 26 23:50:54 crc kubenswrapper[4903]: I1126 23:50:54.261235 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b15a6d5-2bfd-41cd-9178-f35011bc663f-utilities" (OuterVolumeSpecName: "utilities") pod "4b15a6d5-2bfd-41cd-9178-f35011bc663f" (UID: "4b15a6d5-2bfd-41cd-9178-f35011bc663f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:50:54 crc kubenswrapper[4903]: I1126 23:50:54.265970 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b15a6d5-2bfd-41cd-9178-f35011bc663f-kube-api-access-cb7jd" (OuterVolumeSpecName: "kube-api-access-cb7jd") pod "4b15a6d5-2bfd-41cd-9178-f35011bc663f" (UID: "4b15a6d5-2bfd-41cd-9178-f35011bc663f"). InnerVolumeSpecName "kube-api-access-cb7jd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:50:54 crc kubenswrapper[4903]: I1126 23:50:54.363553 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cb7jd\" (UniqueName: \"kubernetes.io/projected/4b15a6d5-2bfd-41cd-9178-f35011bc663f-kube-api-access-cb7jd\") on node \"crc\" DevicePath \"\"" Nov 26 23:50:54 crc kubenswrapper[4903]: I1126 23:50:54.363587 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b15a6d5-2bfd-41cd-9178-f35011bc663f-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 23:50:54 crc kubenswrapper[4903]: I1126 23:50:54.512185 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b15a6d5-2bfd-41cd-9178-f35011bc663f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4b15a6d5-2bfd-41cd-9178-f35011bc663f" (UID: "4b15a6d5-2bfd-41cd-9178-f35011bc663f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:50:54 crc kubenswrapper[4903]: I1126 23:50:54.569084 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b15a6d5-2bfd-41cd-9178-f35011bc663f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 23:50:54 crc kubenswrapper[4903]: I1126 23:50:54.676490 4903 generic.go:334] "Generic (PLEG): container finished" podID="4b15a6d5-2bfd-41cd-9178-f35011bc663f" containerID="6389b694523f3610db23a7d9eb927cdb846dea850e2da3598cc34c48901dab91" exitCode=0 Nov 26 23:50:54 crc kubenswrapper[4903]: I1126 23:50:54.676554 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c8787" event={"ID":"4b15a6d5-2bfd-41cd-9178-f35011bc663f","Type":"ContainerDied","Data":"6389b694523f3610db23a7d9eb927cdb846dea850e2da3598cc34c48901dab91"} Nov 26 23:50:54 crc kubenswrapper[4903]: I1126 23:50:54.676575 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c8787" Nov 26 23:50:54 crc kubenswrapper[4903]: I1126 23:50:54.676594 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c8787" event={"ID":"4b15a6d5-2bfd-41cd-9178-f35011bc663f","Type":"ContainerDied","Data":"e41cf2acec4ef7221b426969a84ebdb3f5cf2ea4146511c17dbb2c481dfcdb0d"} Nov 26 23:50:54 crc kubenswrapper[4903]: I1126 23:50:54.676626 4903 scope.go:117] "RemoveContainer" containerID="6389b694523f3610db23a7d9eb927cdb846dea850e2da3598cc34c48901dab91" Nov 26 23:50:54 crc kubenswrapper[4903]: I1126 23:50:54.704972 4903 scope.go:117] "RemoveContainer" containerID="dd4cf27d617135c1fec1514c4d670259e0d857014ce76addecb4b1393c0fc1a8" Nov 26 23:50:54 crc kubenswrapper[4903]: I1126 23:50:54.733376 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c8787"] Nov 26 23:50:54 crc kubenswrapper[4903]: I1126 23:50:54.744174 4903 scope.go:117] "RemoveContainer" containerID="327b33d8ad863a39109a7d86a8866e845fb855b232f95a7943259dde1601636e" Nov 26 23:50:54 crc kubenswrapper[4903]: I1126 23:50:54.746420 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-c8787"] Nov 26 23:50:54 crc kubenswrapper[4903]: I1126 23:50:54.819353 4903 scope.go:117] "RemoveContainer" containerID="6389b694523f3610db23a7d9eb927cdb846dea850e2da3598cc34c48901dab91" Nov 26 23:50:54 crc kubenswrapper[4903]: E1126 23:50:54.820044 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6389b694523f3610db23a7d9eb927cdb846dea850e2da3598cc34c48901dab91\": container with ID starting with 6389b694523f3610db23a7d9eb927cdb846dea850e2da3598cc34c48901dab91 not found: ID does not exist" containerID="6389b694523f3610db23a7d9eb927cdb846dea850e2da3598cc34c48901dab91" Nov 26 23:50:54 crc kubenswrapper[4903]: I1126 23:50:54.820106 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6389b694523f3610db23a7d9eb927cdb846dea850e2da3598cc34c48901dab91"} err="failed to get container status \"6389b694523f3610db23a7d9eb927cdb846dea850e2da3598cc34c48901dab91\": rpc error: code = NotFound desc = could not find container \"6389b694523f3610db23a7d9eb927cdb846dea850e2da3598cc34c48901dab91\": container with ID starting with 6389b694523f3610db23a7d9eb927cdb846dea850e2da3598cc34c48901dab91 not found: ID does not exist" Nov 26 23:50:54 crc kubenswrapper[4903]: I1126 23:50:54.820147 4903 scope.go:117] "RemoveContainer" containerID="dd4cf27d617135c1fec1514c4d670259e0d857014ce76addecb4b1393c0fc1a8" Nov 26 23:50:54 crc kubenswrapper[4903]: E1126 23:50:54.820580 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dd4cf27d617135c1fec1514c4d670259e0d857014ce76addecb4b1393c0fc1a8\": container with ID starting with dd4cf27d617135c1fec1514c4d670259e0d857014ce76addecb4b1393c0fc1a8 not found: ID does not exist" containerID="dd4cf27d617135c1fec1514c4d670259e0d857014ce76addecb4b1393c0fc1a8" Nov 26 23:50:54 crc kubenswrapper[4903]: I1126 23:50:54.820632 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd4cf27d617135c1fec1514c4d670259e0d857014ce76addecb4b1393c0fc1a8"} err="failed to get container status \"dd4cf27d617135c1fec1514c4d670259e0d857014ce76addecb4b1393c0fc1a8\": rpc error: code = NotFound desc = could not find container \"dd4cf27d617135c1fec1514c4d670259e0d857014ce76addecb4b1393c0fc1a8\": container with ID starting with dd4cf27d617135c1fec1514c4d670259e0d857014ce76addecb4b1393c0fc1a8 not found: ID does not exist" Nov 26 23:50:54 crc kubenswrapper[4903]: I1126 23:50:54.820661 4903 scope.go:117] "RemoveContainer" containerID="327b33d8ad863a39109a7d86a8866e845fb855b232f95a7943259dde1601636e" Nov 26 23:50:54 crc kubenswrapper[4903]: E1126 23:50:54.821170 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"327b33d8ad863a39109a7d86a8866e845fb855b232f95a7943259dde1601636e\": container with ID starting with 327b33d8ad863a39109a7d86a8866e845fb855b232f95a7943259dde1601636e not found: ID does not exist" containerID="327b33d8ad863a39109a7d86a8866e845fb855b232f95a7943259dde1601636e" Nov 26 23:50:54 crc kubenswrapper[4903]: I1126 23:50:54.821206 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"327b33d8ad863a39109a7d86a8866e845fb855b232f95a7943259dde1601636e"} err="failed to get container status \"327b33d8ad863a39109a7d86a8866e845fb855b232f95a7943259dde1601636e\": rpc error: code = NotFound desc = could not find container \"327b33d8ad863a39109a7d86a8866e845fb855b232f95a7943259dde1601636e\": container with ID starting with 327b33d8ad863a39109a7d86a8866e845fb855b232f95a7943259dde1601636e not found: ID does not exist" Nov 26 23:50:56 crc kubenswrapper[4903]: I1126 23:50:56.045317 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b15a6d5-2bfd-41cd-9178-f35011bc663f" path="/var/lib/kubelet/pods/4b15a6d5-2bfd-41cd-9178-f35011bc663f/volumes" Nov 26 23:51:06 crc kubenswrapper[4903]: I1126 23:51:06.028620 4903 scope.go:117] "RemoveContainer" containerID="8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084" Nov 26 23:51:06 crc kubenswrapper[4903]: E1126 23:51:06.029437 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:51:21 crc kubenswrapper[4903]: I1126 23:51:21.029033 4903 scope.go:117] "RemoveContainer" containerID="8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084" Nov 26 23:51:21 crc kubenswrapper[4903]: E1126 23:51:21.030404 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:51:33 crc kubenswrapper[4903]: I1126 23:51:33.028133 4903 scope.go:117] "RemoveContainer" containerID="8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084" Nov 26 23:51:33 crc kubenswrapper[4903]: E1126 23:51:33.029194 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:51:44 crc kubenswrapper[4903]: I1126 23:51:44.028924 4903 scope.go:117] "RemoveContainer" containerID="8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084" Nov 26 23:51:44 crc kubenswrapper[4903]: E1126 23:51:44.029732 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:51:59 crc kubenswrapper[4903]: I1126 23:51:59.028840 4903 scope.go:117] "RemoveContainer" containerID="8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084" Nov 26 23:51:59 crc kubenswrapper[4903]: E1126 23:51:59.029489 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:52:10 crc kubenswrapper[4903]: I1126 23:52:10.029418 4903 scope.go:117] "RemoveContainer" containerID="8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084" Nov 26 23:52:10 crc kubenswrapper[4903]: E1126 23:52:10.030415 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:52:25 crc kubenswrapper[4903]: I1126 23:52:25.028743 4903 scope.go:117] "RemoveContainer" containerID="8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084" Nov 26 23:52:25 crc kubenswrapper[4903]: E1126 23:52:25.029611 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:52:38 crc kubenswrapper[4903]: I1126 23:52:38.029270 4903 scope.go:117] "RemoveContainer" containerID="8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084" Nov 26 23:52:38 crc kubenswrapper[4903]: E1126 23:52:38.030944 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:52:49 crc kubenswrapper[4903]: I1126 23:52:49.030534 4903 scope.go:117] "RemoveContainer" containerID="8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084" Nov 26 23:52:49 crc kubenswrapper[4903]: E1126 23:52:49.031587 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:53:00 crc kubenswrapper[4903]: I1126 23:53:00.028378 4903 scope.go:117] "RemoveContainer" containerID="8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084" Nov 26 23:53:00 crc kubenswrapper[4903]: E1126 23:53:00.029090 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:53:13 crc kubenswrapper[4903]: I1126 23:53:13.029147 4903 scope.go:117] "RemoveContainer" containerID="8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084" Nov 26 23:53:13 crc kubenswrapper[4903]: E1126 23:53:13.031253 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:53:25 crc kubenswrapper[4903]: I1126 23:53:25.028435 4903 scope.go:117] "RemoveContainer" containerID="8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084" Nov 26 23:53:25 crc kubenswrapper[4903]: E1126 23:53:25.029381 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:53:36 crc kubenswrapper[4903]: I1126 23:53:36.029788 4903 scope.go:117] "RemoveContainer" containerID="8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084" Nov 26 23:53:36 crc kubenswrapper[4903]: E1126 23:53:36.030971 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:53:49 crc kubenswrapper[4903]: I1126 23:53:49.029587 4903 scope.go:117] "RemoveContainer" containerID="8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084" Nov 26 23:53:49 crc kubenswrapper[4903]: E1126 23:53:49.030352 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:54:01 crc kubenswrapper[4903]: I1126 23:54:01.028368 4903 scope.go:117] "RemoveContainer" containerID="8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084" Nov 26 23:54:01 crc kubenswrapper[4903]: E1126 23:54:01.030475 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:54:07 crc kubenswrapper[4903]: I1126 23:54:07.442160 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-v7cgs"] Nov 26 23:54:07 crc kubenswrapper[4903]: E1126 23:54:07.444169 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b15a6d5-2bfd-41cd-9178-f35011bc663f" containerName="extract-content" Nov 26 23:54:07 crc kubenswrapper[4903]: I1126 23:54:07.444209 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b15a6d5-2bfd-41cd-9178-f35011bc663f" containerName="extract-content" Nov 26 23:54:07 crc kubenswrapper[4903]: E1126 23:54:07.444310 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b15a6d5-2bfd-41cd-9178-f35011bc663f" containerName="registry-server" Nov 26 23:54:07 crc kubenswrapper[4903]: I1126 23:54:07.444333 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b15a6d5-2bfd-41cd-9178-f35011bc663f" containerName="registry-server" Nov 26 23:54:07 crc kubenswrapper[4903]: E1126 23:54:07.444393 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b15a6d5-2bfd-41cd-9178-f35011bc663f" containerName="extract-utilities" Nov 26 23:54:07 crc kubenswrapper[4903]: I1126 23:54:07.444414 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b15a6d5-2bfd-41cd-9178-f35011bc663f" containerName="extract-utilities" Nov 26 23:54:07 crc kubenswrapper[4903]: I1126 23:54:07.445249 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b15a6d5-2bfd-41cd-9178-f35011bc663f" containerName="registry-server" Nov 26 23:54:07 crc kubenswrapper[4903]: I1126 23:54:07.450627 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-v7cgs" Nov 26 23:54:07 crc kubenswrapper[4903]: I1126 23:54:07.460399 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-v7cgs"] Nov 26 23:54:07 crc kubenswrapper[4903]: I1126 23:54:07.605451 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6gg5\" (UniqueName: \"kubernetes.io/projected/625bbce3-47da-4cdc-9c42-59135e2ff822-kube-api-access-k6gg5\") pod \"community-operators-v7cgs\" (UID: \"625bbce3-47da-4cdc-9c42-59135e2ff822\") " pod="openshift-marketplace/community-operators-v7cgs" Nov 26 23:54:07 crc kubenswrapper[4903]: I1126 23:54:07.605514 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/625bbce3-47da-4cdc-9c42-59135e2ff822-utilities\") pod \"community-operators-v7cgs\" (UID: \"625bbce3-47da-4cdc-9c42-59135e2ff822\") " pod="openshift-marketplace/community-operators-v7cgs" Nov 26 23:54:07 crc kubenswrapper[4903]: I1126 23:54:07.606043 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/625bbce3-47da-4cdc-9c42-59135e2ff822-catalog-content\") pod \"community-operators-v7cgs\" (UID: \"625bbce3-47da-4cdc-9c42-59135e2ff822\") " pod="openshift-marketplace/community-operators-v7cgs" Nov 26 23:54:07 crc kubenswrapper[4903]: I1126 23:54:07.709374 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/625bbce3-47da-4cdc-9c42-59135e2ff822-catalog-content\") pod \"community-operators-v7cgs\" (UID: \"625bbce3-47da-4cdc-9c42-59135e2ff822\") " pod="openshift-marketplace/community-operators-v7cgs" Nov 26 23:54:07 crc kubenswrapper[4903]: I1126 23:54:07.709594 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6gg5\" (UniqueName: \"kubernetes.io/projected/625bbce3-47da-4cdc-9c42-59135e2ff822-kube-api-access-k6gg5\") pod \"community-operators-v7cgs\" (UID: \"625bbce3-47da-4cdc-9c42-59135e2ff822\") " pod="openshift-marketplace/community-operators-v7cgs" Nov 26 23:54:07 crc kubenswrapper[4903]: I1126 23:54:07.709637 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/625bbce3-47da-4cdc-9c42-59135e2ff822-utilities\") pod \"community-operators-v7cgs\" (UID: \"625bbce3-47da-4cdc-9c42-59135e2ff822\") " pod="openshift-marketplace/community-operators-v7cgs" Nov 26 23:54:07 crc kubenswrapper[4903]: I1126 23:54:07.709967 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/625bbce3-47da-4cdc-9c42-59135e2ff822-catalog-content\") pod \"community-operators-v7cgs\" (UID: \"625bbce3-47da-4cdc-9c42-59135e2ff822\") " pod="openshift-marketplace/community-operators-v7cgs" Nov 26 23:54:07 crc kubenswrapper[4903]: I1126 23:54:07.710022 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/625bbce3-47da-4cdc-9c42-59135e2ff822-utilities\") pod \"community-operators-v7cgs\" (UID: \"625bbce3-47da-4cdc-9c42-59135e2ff822\") " pod="openshift-marketplace/community-operators-v7cgs" Nov 26 23:54:07 crc kubenswrapper[4903]: I1126 23:54:07.729551 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6gg5\" (UniqueName: \"kubernetes.io/projected/625bbce3-47da-4cdc-9c42-59135e2ff822-kube-api-access-k6gg5\") pod \"community-operators-v7cgs\" (UID: \"625bbce3-47da-4cdc-9c42-59135e2ff822\") " pod="openshift-marketplace/community-operators-v7cgs" Nov 26 23:54:07 crc kubenswrapper[4903]: I1126 23:54:07.786494 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-v7cgs" Nov 26 23:54:08 crc kubenswrapper[4903]: I1126 23:54:08.326313 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-v7cgs"] Nov 26 23:54:08 crc kubenswrapper[4903]: I1126 23:54:08.472475 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v7cgs" event={"ID":"625bbce3-47da-4cdc-9c42-59135e2ff822","Type":"ContainerStarted","Data":"4fb9a8eba7c158d4970d17ac31f486e03fd2202b459c31e49049d4981ba276f9"} Nov 26 23:54:09 crc kubenswrapper[4903]: I1126 23:54:09.494023 4903 generic.go:334] "Generic (PLEG): container finished" podID="625bbce3-47da-4cdc-9c42-59135e2ff822" containerID="1cf1c6c30aed33590d6182eee667ae81f7665fb76521226500026d6ac94821c9" exitCode=0 Nov 26 23:54:09 crc kubenswrapper[4903]: I1126 23:54:09.494147 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v7cgs" event={"ID":"625bbce3-47da-4cdc-9c42-59135e2ff822","Type":"ContainerDied","Data":"1cf1c6c30aed33590d6182eee667ae81f7665fb76521226500026d6ac94821c9"} Nov 26 23:54:09 crc kubenswrapper[4903]: I1126 23:54:09.497509 4903 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 23:54:11 crc kubenswrapper[4903]: I1126 23:54:11.528526 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v7cgs" event={"ID":"625bbce3-47da-4cdc-9c42-59135e2ff822","Type":"ContainerStarted","Data":"2eea7de7c2661daa8baeac8aced09612aa3137feb17edd24961b4b58bd4c6ebc"} Nov 26 23:54:12 crc kubenswrapper[4903]: I1126 23:54:12.540909 4903 generic.go:334] "Generic (PLEG): container finished" podID="625bbce3-47da-4cdc-9c42-59135e2ff822" containerID="2eea7de7c2661daa8baeac8aced09612aa3137feb17edd24961b4b58bd4c6ebc" exitCode=0 Nov 26 23:54:12 crc kubenswrapper[4903]: I1126 23:54:12.541084 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v7cgs" event={"ID":"625bbce3-47da-4cdc-9c42-59135e2ff822","Type":"ContainerDied","Data":"2eea7de7c2661daa8baeac8aced09612aa3137feb17edd24961b4b58bd4c6ebc"} Nov 26 23:54:13 crc kubenswrapper[4903]: I1126 23:54:13.561130 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v7cgs" event={"ID":"625bbce3-47da-4cdc-9c42-59135e2ff822","Type":"ContainerStarted","Data":"2bbec6a47b76b4e8ac7513ec97d10552b2eabbbe2622132eb24179cded8d01be"} Nov 26 23:54:13 crc kubenswrapper[4903]: I1126 23:54:13.590203 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-v7cgs" podStartSLOduration=2.824357811 podStartE2EDuration="6.590180248s" podCreationTimestamp="2025-11-26 23:54:07 +0000 UTC" firstStartedPulling="2025-11-26 23:54:09.497245657 +0000 UTC m=+5578.187480577" lastFinishedPulling="2025-11-26 23:54:13.263068064 +0000 UTC m=+5581.953303014" observedRunningTime="2025-11-26 23:54:13.579926473 +0000 UTC m=+5582.270161423" watchObservedRunningTime="2025-11-26 23:54:13.590180248 +0000 UTC m=+5582.280415158" Nov 26 23:54:15 crc kubenswrapper[4903]: I1126 23:54:15.028913 4903 scope.go:117] "RemoveContainer" containerID="8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084" Nov 26 23:54:15 crc kubenswrapper[4903]: E1126 23:54:15.029929 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:54:17 crc kubenswrapper[4903]: I1126 23:54:17.787596 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-v7cgs" Nov 26 23:54:17 crc kubenswrapper[4903]: I1126 23:54:17.788176 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-v7cgs" Nov 26 23:54:18 crc kubenswrapper[4903]: I1126 23:54:18.864726 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-v7cgs" podUID="625bbce3-47da-4cdc-9c42-59135e2ff822" containerName="registry-server" probeResult="failure" output=< Nov 26 23:54:18 crc kubenswrapper[4903]: timeout: failed to connect service ":50051" within 1s Nov 26 23:54:18 crc kubenswrapper[4903]: > Nov 26 23:54:27 crc kubenswrapper[4903]: I1126 23:54:27.859479 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-v7cgs" Nov 26 23:54:27 crc kubenswrapper[4903]: I1126 23:54:27.929021 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-v7cgs" Nov 26 23:54:28 crc kubenswrapper[4903]: I1126 23:54:28.106864 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-v7cgs"] Nov 26 23:54:29 crc kubenswrapper[4903]: I1126 23:54:29.029702 4903 scope.go:117] "RemoveContainer" containerID="8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084" Nov 26 23:54:29 crc kubenswrapper[4903]: E1126 23:54:29.030279 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 26 23:54:29 crc kubenswrapper[4903]: I1126 23:54:29.809389 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-v7cgs" podUID="625bbce3-47da-4cdc-9c42-59135e2ff822" containerName="registry-server" containerID="cri-o://2bbec6a47b76b4e8ac7513ec97d10552b2eabbbe2622132eb24179cded8d01be" gracePeriod=2 Nov 26 23:54:30 crc kubenswrapper[4903]: I1126 23:54:30.424262 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-v7cgs" Nov 26 23:54:30 crc kubenswrapper[4903]: I1126 23:54:30.576947 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/625bbce3-47da-4cdc-9c42-59135e2ff822-catalog-content\") pod \"625bbce3-47da-4cdc-9c42-59135e2ff822\" (UID: \"625bbce3-47da-4cdc-9c42-59135e2ff822\") " Nov 26 23:54:30 crc kubenswrapper[4903]: I1126 23:54:30.577005 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k6gg5\" (UniqueName: \"kubernetes.io/projected/625bbce3-47da-4cdc-9c42-59135e2ff822-kube-api-access-k6gg5\") pod \"625bbce3-47da-4cdc-9c42-59135e2ff822\" (UID: \"625bbce3-47da-4cdc-9c42-59135e2ff822\") " Nov 26 23:54:30 crc kubenswrapper[4903]: I1126 23:54:30.577088 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/625bbce3-47da-4cdc-9c42-59135e2ff822-utilities\") pod \"625bbce3-47da-4cdc-9c42-59135e2ff822\" (UID: \"625bbce3-47da-4cdc-9c42-59135e2ff822\") " Nov 26 23:54:30 crc kubenswrapper[4903]: I1126 23:54:30.578428 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/625bbce3-47da-4cdc-9c42-59135e2ff822-utilities" (OuterVolumeSpecName: "utilities") pod "625bbce3-47da-4cdc-9c42-59135e2ff822" (UID: "625bbce3-47da-4cdc-9c42-59135e2ff822"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:54:30 crc kubenswrapper[4903]: I1126 23:54:30.583548 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/625bbce3-47da-4cdc-9c42-59135e2ff822-kube-api-access-k6gg5" (OuterVolumeSpecName: "kube-api-access-k6gg5") pod "625bbce3-47da-4cdc-9c42-59135e2ff822" (UID: "625bbce3-47da-4cdc-9c42-59135e2ff822"). InnerVolumeSpecName "kube-api-access-k6gg5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:54:30 crc kubenswrapper[4903]: I1126 23:54:30.652214 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/625bbce3-47da-4cdc-9c42-59135e2ff822-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "625bbce3-47da-4cdc-9c42-59135e2ff822" (UID: "625bbce3-47da-4cdc-9c42-59135e2ff822"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:54:30 crc kubenswrapper[4903]: I1126 23:54:30.680937 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/625bbce3-47da-4cdc-9c42-59135e2ff822-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 23:54:30 crc kubenswrapper[4903]: I1126 23:54:30.681058 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k6gg5\" (UniqueName: \"kubernetes.io/projected/625bbce3-47da-4cdc-9c42-59135e2ff822-kube-api-access-k6gg5\") on node \"crc\" DevicePath \"\"" Nov 26 23:54:30 crc kubenswrapper[4903]: I1126 23:54:30.681089 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/625bbce3-47da-4cdc-9c42-59135e2ff822-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 23:54:30 crc kubenswrapper[4903]: I1126 23:54:30.852547 4903 generic.go:334] "Generic (PLEG): container finished" podID="625bbce3-47da-4cdc-9c42-59135e2ff822" containerID="2bbec6a47b76b4e8ac7513ec97d10552b2eabbbe2622132eb24179cded8d01be" exitCode=0 Nov 26 23:54:30 crc kubenswrapper[4903]: I1126 23:54:30.852628 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v7cgs" event={"ID":"625bbce3-47da-4cdc-9c42-59135e2ff822","Type":"ContainerDied","Data":"2bbec6a47b76b4e8ac7513ec97d10552b2eabbbe2622132eb24179cded8d01be"} Nov 26 23:54:30 crc kubenswrapper[4903]: I1126 23:54:30.853049 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v7cgs" event={"ID":"625bbce3-47da-4cdc-9c42-59135e2ff822","Type":"ContainerDied","Data":"4fb9a8eba7c158d4970d17ac31f486e03fd2202b459c31e49049d4981ba276f9"} Nov 26 23:54:30 crc kubenswrapper[4903]: I1126 23:54:30.852670 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-v7cgs" Nov 26 23:54:30 crc kubenswrapper[4903]: I1126 23:54:30.853124 4903 scope.go:117] "RemoveContainer" containerID="2bbec6a47b76b4e8ac7513ec97d10552b2eabbbe2622132eb24179cded8d01be" Nov 26 23:54:30 crc kubenswrapper[4903]: I1126 23:54:30.909970 4903 scope.go:117] "RemoveContainer" containerID="2eea7de7c2661daa8baeac8aced09612aa3137feb17edd24961b4b58bd4c6ebc" Nov 26 23:54:30 crc kubenswrapper[4903]: I1126 23:54:30.913569 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-v7cgs"] Nov 26 23:54:30 crc kubenswrapper[4903]: I1126 23:54:30.928126 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-v7cgs"] Nov 26 23:54:31 crc kubenswrapper[4903]: I1126 23:54:31.604033 4903 scope.go:117] "RemoveContainer" containerID="1cf1c6c30aed33590d6182eee667ae81f7665fb76521226500026d6ac94821c9" Nov 26 23:54:31 crc kubenswrapper[4903]: I1126 23:54:31.705819 4903 scope.go:117] "RemoveContainer" containerID="2bbec6a47b76b4e8ac7513ec97d10552b2eabbbe2622132eb24179cded8d01be" Nov 26 23:54:31 crc kubenswrapper[4903]: E1126 23:54:31.706627 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2bbec6a47b76b4e8ac7513ec97d10552b2eabbbe2622132eb24179cded8d01be\": container with ID starting with 2bbec6a47b76b4e8ac7513ec97d10552b2eabbbe2622132eb24179cded8d01be not found: ID does not exist" containerID="2bbec6a47b76b4e8ac7513ec97d10552b2eabbbe2622132eb24179cded8d01be" Nov 26 23:54:31 crc kubenswrapper[4903]: I1126 23:54:31.706669 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2bbec6a47b76b4e8ac7513ec97d10552b2eabbbe2622132eb24179cded8d01be"} err="failed to get container status \"2bbec6a47b76b4e8ac7513ec97d10552b2eabbbe2622132eb24179cded8d01be\": rpc error: code = NotFound desc = could not find container \"2bbec6a47b76b4e8ac7513ec97d10552b2eabbbe2622132eb24179cded8d01be\": container with ID starting with 2bbec6a47b76b4e8ac7513ec97d10552b2eabbbe2622132eb24179cded8d01be not found: ID does not exist" Nov 26 23:54:31 crc kubenswrapper[4903]: I1126 23:54:31.706710 4903 scope.go:117] "RemoveContainer" containerID="2eea7de7c2661daa8baeac8aced09612aa3137feb17edd24961b4b58bd4c6ebc" Nov 26 23:54:31 crc kubenswrapper[4903]: E1126 23:54:31.707084 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2eea7de7c2661daa8baeac8aced09612aa3137feb17edd24961b4b58bd4c6ebc\": container with ID starting with 2eea7de7c2661daa8baeac8aced09612aa3137feb17edd24961b4b58bd4c6ebc not found: ID does not exist" containerID="2eea7de7c2661daa8baeac8aced09612aa3137feb17edd24961b4b58bd4c6ebc" Nov 26 23:54:31 crc kubenswrapper[4903]: I1126 23:54:31.707111 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2eea7de7c2661daa8baeac8aced09612aa3137feb17edd24961b4b58bd4c6ebc"} err="failed to get container status \"2eea7de7c2661daa8baeac8aced09612aa3137feb17edd24961b4b58bd4c6ebc\": rpc error: code = NotFound desc = could not find container \"2eea7de7c2661daa8baeac8aced09612aa3137feb17edd24961b4b58bd4c6ebc\": container with ID starting with 2eea7de7c2661daa8baeac8aced09612aa3137feb17edd24961b4b58bd4c6ebc not found: ID does not exist" Nov 26 23:54:31 crc kubenswrapper[4903]: I1126 23:54:31.707128 4903 scope.go:117] "RemoveContainer" containerID="1cf1c6c30aed33590d6182eee667ae81f7665fb76521226500026d6ac94821c9" Nov 26 23:54:31 crc kubenswrapper[4903]: E1126 23:54:31.707383 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1cf1c6c30aed33590d6182eee667ae81f7665fb76521226500026d6ac94821c9\": container with ID starting with 1cf1c6c30aed33590d6182eee667ae81f7665fb76521226500026d6ac94821c9 not found: ID does not exist" containerID="1cf1c6c30aed33590d6182eee667ae81f7665fb76521226500026d6ac94821c9" Nov 26 23:54:31 crc kubenswrapper[4903]: I1126 23:54:31.707413 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1cf1c6c30aed33590d6182eee667ae81f7665fb76521226500026d6ac94821c9"} err="failed to get container status \"1cf1c6c30aed33590d6182eee667ae81f7665fb76521226500026d6ac94821c9\": rpc error: code = NotFound desc = could not find container \"1cf1c6c30aed33590d6182eee667ae81f7665fb76521226500026d6ac94821c9\": container with ID starting with 1cf1c6c30aed33590d6182eee667ae81f7665fb76521226500026d6ac94821c9 not found: ID does not exist" Nov 26 23:54:32 crc kubenswrapper[4903]: I1126 23:54:32.041529 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="625bbce3-47da-4cdc-9c42-59135e2ff822" path="/var/lib/kubelet/pods/625bbce3-47da-4cdc-9c42-59135e2ff822/volumes" Nov 26 23:54:43 crc kubenswrapper[4903]: I1126 23:54:43.028334 4903 scope.go:117] "RemoveContainer" containerID="8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084" Nov 26 23:54:44 crc kubenswrapper[4903]: I1126 23:54:44.064412 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerStarted","Data":"d8d408e8b27dccccb582ea57a07f095c78cb127762f8f697c02db973a850b725"} Nov 26 23:57:01 crc kubenswrapper[4903]: I1126 23:57:01.980812 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 23:57:01 crc kubenswrapper[4903]: I1126 23:57:01.981475 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 23:57:31 crc kubenswrapper[4903]: I1126 23:57:31.981870 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 23:57:31 crc kubenswrapper[4903]: I1126 23:57:31.982683 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 23:58:01 crc kubenswrapper[4903]: I1126 23:58:01.981265 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 23:58:01 crc kubenswrapper[4903]: I1126 23:58:01.982331 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 23:58:01 crc kubenswrapper[4903]: I1126 23:58:01.982462 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 26 23:58:01 crc kubenswrapper[4903]: I1126 23:58:01.984296 4903 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d8d408e8b27dccccb582ea57a07f095c78cb127762f8f697c02db973a850b725"} pod="openshift-machine-config-operator/machine-config-daemon-wjwph" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 23:58:01 crc kubenswrapper[4903]: I1126 23:58:01.984400 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" containerID="cri-o://d8d408e8b27dccccb582ea57a07f095c78cb127762f8f697c02db973a850b725" gracePeriod=600 Nov 26 23:58:02 crc kubenswrapper[4903]: I1126 23:58:02.949720 4903 generic.go:334] "Generic (PLEG): container finished" podID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerID="d8d408e8b27dccccb582ea57a07f095c78cb127762f8f697c02db973a850b725" exitCode=0 Nov 26 23:58:02 crc kubenswrapper[4903]: I1126 23:58:02.949791 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerDied","Data":"d8d408e8b27dccccb582ea57a07f095c78cb127762f8f697c02db973a850b725"} Nov 26 23:58:02 crc kubenswrapper[4903]: I1126 23:58:02.950445 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerStarted","Data":"02f515062b7ff2bda08e09cb8c72a3611586455ba29e6f79c4fb592e1f750455"} Nov 26 23:58:02 crc kubenswrapper[4903]: I1126 23:58:02.950467 4903 scope.go:117] "RemoveContainer" containerID="8f85e17c5f290187948c26098cd0c6d45f4670cf8e62be70b2987f8b1cf67084" Nov 26 23:58:09 crc kubenswrapper[4903]: I1126 23:58:09.045808 4903 generic.go:334] "Generic (PLEG): container finished" podID="47c0a41f-61f3-4e6c-8367-a25c5a75d02b" containerID="68e2d88354572b9806a86031cc200cba2198e8427bb0206fbbf738a7eef73da7" exitCode=0 Nov 26 23:58:09 crc kubenswrapper[4903]: I1126 23:58:09.045849 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"47c0a41f-61f3-4e6c-8367-a25c5a75d02b","Type":"ContainerDied","Data":"68e2d88354572b9806a86031cc200cba2198e8427bb0206fbbf738a7eef73da7"} Nov 26 23:58:10 crc kubenswrapper[4903]: I1126 23:58:10.544137 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 26 23:58:10 crc kubenswrapper[4903]: I1126 23:58:10.601817 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-test-operator-ephemeral-workdir\") pod \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " Nov 26 23:58:10 crc kubenswrapper[4903]: I1126 23:58:10.601932 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7d4b7\" (UniqueName: \"kubernetes.io/projected/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-kube-api-access-7d4b7\") pod \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " Nov 26 23:58:10 crc kubenswrapper[4903]: I1126 23:58:10.601989 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-test-operator-ephemeral-temporary\") pod \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " Nov 26 23:58:10 crc kubenswrapper[4903]: I1126 23:58:10.602066 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-ssh-key\") pod \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " Nov 26 23:58:10 crc kubenswrapper[4903]: I1126 23:58:10.602155 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " Nov 26 23:58:10 crc kubenswrapper[4903]: I1126 23:58:10.602177 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-config-data\") pod \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " Nov 26 23:58:10 crc kubenswrapper[4903]: I1126 23:58:10.602214 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-openstack-config\") pod \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " Nov 26 23:58:10 crc kubenswrapper[4903]: I1126 23:58:10.602620 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-openstack-config-secret\") pod \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " Nov 26 23:58:10 crc kubenswrapper[4903]: I1126 23:58:10.602676 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-ca-certs\") pod \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\" (UID: \"47c0a41f-61f3-4e6c-8367-a25c5a75d02b\") " Nov 26 23:58:10 crc kubenswrapper[4903]: I1126 23:58:10.604807 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-config-data" (OuterVolumeSpecName: "config-data") pod "47c0a41f-61f3-4e6c-8367-a25c5a75d02b" (UID: "47c0a41f-61f3-4e6c-8367-a25c5a75d02b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 23:58:10 crc kubenswrapper[4903]: I1126 23:58:10.607788 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "47c0a41f-61f3-4e6c-8367-a25c5a75d02b" (UID: "47c0a41f-61f3-4e6c-8367-a25c5a75d02b"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:58:10 crc kubenswrapper[4903]: I1126 23:58:10.612280 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-kube-api-access-7d4b7" (OuterVolumeSpecName: "kube-api-access-7d4b7") pod "47c0a41f-61f3-4e6c-8367-a25c5a75d02b" (UID: "47c0a41f-61f3-4e6c-8367-a25c5a75d02b"). InnerVolumeSpecName "kube-api-access-7d4b7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:58:10 crc kubenswrapper[4903]: I1126 23:58:10.613657 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "test-operator-logs") pod "47c0a41f-61f3-4e6c-8367-a25c5a75d02b" (UID: "47c0a41f-61f3-4e6c-8367-a25c5a75d02b"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 23:58:10 crc kubenswrapper[4903]: I1126 23:58:10.615549 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "47c0a41f-61f3-4e6c-8367-a25c5a75d02b" (UID: "47c0a41f-61f3-4e6c-8367-a25c5a75d02b"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:58:10 crc kubenswrapper[4903]: I1126 23:58:10.641269 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "47c0a41f-61f3-4e6c-8367-a25c5a75d02b" (UID: "47c0a41f-61f3-4e6c-8367-a25c5a75d02b"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:58:10 crc kubenswrapper[4903]: I1126 23:58:10.642619 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "47c0a41f-61f3-4e6c-8367-a25c5a75d02b" (UID: "47c0a41f-61f3-4e6c-8367-a25c5a75d02b"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:58:10 crc kubenswrapper[4903]: I1126 23:58:10.643206 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "47c0a41f-61f3-4e6c-8367-a25c5a75d02b" (UID: "47c0a41f-61f3-4e6c-8367-a25c5a75d02b"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 23:58:10 crc kubenswrapper[4903]: I1126 23:58:10.700773 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "47c0a41f-61f3-4e6c-8367-a25c5a75d02b" (UID: "47c0a41f-61f3-4e6c-8367-a25c5a75d02b"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 23:58:10 crc kubenswrapper[4903]: I1126 23:58:10.706347 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7d4b7\" (UniqueName: \"kubernetes.io/projected/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-kube-api-access-7d4b7\") on node \"crc\" DevicePath \"\"" Nov 26 23:58:10 crc kubenswrapper[4903]: I1126 23:58:10.706391 4903 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Nov 26 23:58:10 crc kubenswrapper[4903]: I1126 23:58:10.706410 4903 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 23:58:10 crc kubenswrapper[4903]: I1126 23:58:10.706959 4903 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Nov 26 23:58:10 crc kubenswrapper[4903]: I1126 23:58:10.706982 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 23:58:10 crc kubenswrapper[4903]: I1126 23:58:10.706994 4903 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 26 23:58:10 crc kubenswrapper[4903]: I1126 23:58:10.707008 4903 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 26 23:58:10 crc kubenswrapper[4903]: I1126 23:58:10.707019 4903 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-ca-certs\") on node \"crc\" DevicePath \"\"" Nov 26 23:58:10 crc kubenswrapper[4903]: I1126 23:58:10.707031 4903 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/47c0a41f-61f3-4e6c-8367-a25c5a75d02b-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Nov 26 23:58:10 crc kubenswrapper[4903]: I1126 23:58:10.743364 4903 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Nov 26 23:58:10 crc kubenswrapper[4903]: I1126 23:58:10.809754 4903 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Nov 26 23:58:11 crc kubenswrapper[4903]: I1126 23:58:11.082778 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"47c0a41f-61f3-4e6c-8367-a25c5a75d02b","Type":"ContainerDied","Data":"56fd6ff5b657fbd9c84224c202687f125b4a044f13b80a30c22d4eb0781e53dd"} Nov 26 23:58:11 crc kubenswrapper[4903]: I1126 23:58:11.082854 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="56fd6ff5b657fbd9c84224c202687f125b4a044f13b80a30c22d4eb0781e53dd" Nov 26 23:58:11 crc kubenswrapper[4903]: I1126 23:58:11.082958 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 26 23:58:20 crc kubenswrapper[4903]: I1126 23:58:20.484685 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 26 23:58:20 crc kubenswrapper[4903]: E1126 23:58:20.487338 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47c0a41f-61f3-4e6c-8367-a25c5a75d02b" containerName="tempest-tests-tempest-tests-runner" Nov 26 23:58:20 crc kubenswrapper[4903]: I1126 23:58:20.487507 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="47c0a41f-61f3-4e6c-8367-a25c5a75d02b" containerName="tempest-tests-tempest-tests-runner" Nov 26 23:58:20 crc kubenswrapper[4903]: E1126 23:58:20.487761 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="625bbce3-47da-4cdc-9c42-59135e2ff822" containerName="registry-server" Nov 26 23:58:20 crc kubenswrapper[4903]: I1126 23:58:20.487908 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="625bbce3-47da-4cdc-9c42-59135e2ff822" containerName="registry-server" Nov 26 23:58:20 crc kubenswrapper[4903]: E1126 23:58:20.488046 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="625bbce3-47da-4cdc-9c42-59135e2ff822" containerName="extract-utilities" Nov 26 23:58:20 crc kubenswrapper[4903]: I1126 23:58:20.488165 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="625bbce3-47da-4cdc-9c42-59135e2ff822" containerName="extract-utilities" Nov 26 23:58:20 crc kubenswrapper[4903]: E1126 23:58:20.488312 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="625bbce3-47da-4cdc-9c42-59135e2ff822" containerName="extract-content" Nov 26 23:58:20 crc kubenswrapper[4903]: I1126 23:58:20.488440 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="625bbce3-47da-4cdc-9c42-59135e2ff822" containerName="extract-content" Nov 26 23:58:20 crc kubenswrapper[4903]: I1126 23:58:20.489078 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="47c0a41f-61f3-4e6c-8367-a25c5a75d02b" containerName="tempest-tests-tempest-tests-runner" Nov 26 23:58:20 crc kubenswrapper[4903]: I1126 23:58:20.489317 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="625bbce3-47da-4cdc-9c42-59135e2ff822" containerName="registry-server" Nov 26 23:58:20 crc kubenswrapper[4903]: I1126 23:58:20.490857 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 23:58:20 crc kubenswrapper[4903]: I1126 23:58:20.498481 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-q4vxt" Nov 26 23:58:20 crc kubenswrapper[4903]: I1126 23:58:20.513066 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 26 23:58:20 crc kubenswrapper[4903]: I1126 23:58:20.683285 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5cjrr\" (UniqueName: \"kubernetes.io/projected/27638dee-c020-4daa-a79a-5acf5e013899-kube-api-access-5cjrr\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"27638dee-c020-4daa-a79a-5acf5e013899\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 23:58:20 crc kubenswrapper[4903]: I1126 23:58:20.683593 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"27638dee-c020-4daa-a79a-5acf5e013899\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 23:58:20 crc kubenswrapper[4903]: I1126 23:58:20.790013 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"27638dee-c020-4daa-a79a-5acf5e013899\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 23:58:20 crc kubenswrapper[4903]: I1126 23:58:20.790363 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5cjrr\" (UniqueName: \"kubernetes.io/projected/27638dee-c020-4daa-a79a-5acf5e013899-kube-api-access-5cjrr\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"27638dee-c020-4daa-a79a-5acf5e013899\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 23:58:20 crc kubenswrapper[4903]: I1126 23:58:20.792100 4903 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"27638dee-c020-4daa-a79a-5acf5e013899\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 23:58:20 crc kubenswrapper[4903]: I1126 23:58:20.821592 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5cjrr\" (UniqueName: \"kubernetes.io/projected/27638dee-c020-4daa-a79a-5acf5e013899-kube-api-access-5cjrr\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"27638dee-c020-4daa-a79a-5acf5e013899\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 23:58:20 crc kubenswrapper[4903]: I1126 23:58:20.855485 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"27638dee-c020-4daa-a79a-5acf5e013899\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 23:58:21 crc kubenswrapper[4903]: I1126 23:58:21.130752 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 23:58:21 crc kubenswrapper[4903]: I1126 23:58:21.676870 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 26 23:58:22 crc kubenswrapper[4903]: I1126 23:58:22.255981 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"27638dee-c020-4daa-a79a-5acf5e013899","Type":"ContainerStarted","Data":"b3108dbbae1ba40c4ac750112f94c9da9caad0086cead6cc8e7aee2c293083c0"} Nov 26 23:58:23 crc kubenswrapper[4903]: I1126 23:58:23.271861 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"27638dee-c020-4daa-a79a-5acf5e013899","Type":"ContainerStarted","Data":"ab977bcc5e892a749256e93f6cc8c9d4e464abe5f5a06259ea11ec34d7456af8"} Nov 26 23:58:23 crc kubenswrapper[4903]: I1126 23:58:23.300966 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=2.042713929 podStartE2EDuration="3.300943514s" podCreationTimestamp="2025-11-26 23:58:20 +0000 UTC" firstStartedPulling="2025-11-26 23:58:21.69790382 +0000 UTC m=+5830.388138730" lastFinishedPulling="2025-11-26 23:58:22.956133405 +0000 UTC m=+5831.646368315" observedRunningTime="2025-11-26 23:58:23.283777986 +0000 UTC m=+5831.974012896" watchObservedRunningTime="2025-11-26 23:58:23.300943514 +0000 UTC m=+5831.991178434" Nov 26 23:58:44 crc kubenswrapper[4903]: I1126 23:58:44.673145 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-pl2cr"] Nov 26 23:58:44 crc kubenswrapper[4903]: I1126 23:58:44.676519 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pl2cr" Nov 26 23:58:44 crc kubenswrapper[4903]: I1126 23:58:44.691429 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pl2cr"] Nov 26 23:58:44 crc kubenswrapper[4903]: I1126 23:58:44.755054 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mw6cz\" (UniqueName: \"kubernetes.io/projected/c5faa409-8381-4108-9779-b1e4a5ad51ac-kube-api-access-mw6cz\") pod \"redhat-marketplace-pl2cr\" (UID: \"c5faa409-8381-4108-9779-b1e4a5ad51ac\") " pod="openshift-marketplace/redhat-marketplace-pl2cr" Nov 26 23:58:44 crc kubenswrapper[4903]: I1126 23:58:44.755637 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5faa409-8381-4108-9779-b1e4a5ad51ac-catalog-content\") pod \"redhat-marketplace-pl2cr\" (UID: \"c5faa409-8381-4108-9779-b1e4a5ad51ac\") " pod="openshift-marketplace/redhat-marketplace-pl2cr" Nov 26 23:58:44 crc kubenswrapper[4903]: I1126 23:58:44.755838 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5faa409-8381-4108-9779-b1e4a5ad51ac-utilities\") pod \"redhat-marketplace-pl2cr\" (UID: \"c5faa409-8381-4108-9779-b1e4a5ad51ac\") " pod="openshift-marketplace/redhat-marketplace-pl2cr" Nov 26 23:58:44 crc kubenswrapper[4903]: I1126 23:58:44.857381 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5faa409-8381-4108-9779-b1e4a5ad51ac-catalog-content\") pod \"redhat-marketplace-pl2cr\" (UID: \"c5faa409-8381-4108-9779-b1e4a5ad51ac\") " pod="openshift-marketplace/redhat-marketplace-pl2cr" Nov 26 23:58:44 crc kubenswrapper[4903]: I1126 23:58:44.857451 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5faa409-8381-4108-9779-b1e4a5ad51ac-utilities\") pod \"redhat-marketplace-pl2cr\" (UID: \"c5faa409-8381-4108-9779-b1e4a5ad51ac\") " pod="openshift-marketplace/redhat-marketplace-pl2cr" Nov 26 23:58:44 crc kubenswrapper[4903]: I1126 23:58:44.857507 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mw6cz\" (UniqueName: \"kubernetes.io/projected/c5faa409-8381-4108-9779-b1e4a5ad51ac-kube-api-access-mw6cz\") pod \"redhat-marketplace-pl2cr\" (UID: \"c5faa409-8381-4108-9779-b1e4a5ad51ac\") " pod="openshift-marketplace/redhat-marketplace-pl2cr" Nov 26 23:58:44 crc kubenswrapper[4903]: I1126 23:58:44.858108 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5faa409-8381-4108-9779-b1e4a5ad51ac-catalog-content\") pod \"redhat-marketplace-pl2cr\" (UID: \"c5faa409-8381-4108-9779-b1e4a5ad51ac\") " pod="openshift-marketplace/redhat-marketplace-pl2cr" Nov 26 23:58:44 crc kubenswrapper[4903]: I1126 23:58:44.858166 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5faa409-8381-4108-9779-b1e4a5ad51ac-utilities\") pod \"redhat-marketplace-pl2cr\" (UID: \"c5faa409-8381-4108-9779-b1e4a5ad51ac\") " pod="openshift-marketplace/redhat-marketplace-pl2cr" Nov 26 23:58:44 crc kubenswrapper[4903]: I1126 23:58:44.880368 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mw6cz\" (UniqueName: \"kubernetes.io/projected/c5faa409-8381-4108-9779-b1e4a5ad51ac-kube-api-access-mw6cz\") pod \"redhat-marketplace-pl2cr\" (UID: \"c5faa409-8381-4108-9779-b1e4a5ad51ac\") " pod="openshift-marketplace/redhat-marketplace-pl2cr" Nov 26 23:58:45 crc kubenswrapper[4903]: I1126 23:58:45.011876 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pl2cr" Nov 26 23:58:45 crc kubenswrapper[4903]: W1126 23:58:45.489198 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc5faa409_8381_4108_9779_b1e4a5ad51ac.slice/crio-5f0a78ccd309849b4bae371a07da804552549c75b93316cd47ba08d60d39e6c0 WatchSource:0}: Error finding container 5f0a78ccd309849b4bae371a07da804552549c75b93316cd47ba08d60d39e6c0: Status 404 returned error can't find the container with id 5f0a78ccd309849b4bae371a07da804552549c75b93316cd47ba08d60d39e6c0 Nov 26 23:58:45 crc kubenswrapper[4903]: I1126 23:58:45.494892 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pl2cr"] Nov 26 23:58:45 crc kubenswrapper[4903]: I1126 23:58:45.583416 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pl2cr" event={"ID":"c5faa409-8381-4108-9779-b1e4a5ad51ac","Type":"ContainerStarted","Data":"5f0a78ccd309849b4bae371a07da804552549c75b93316cd47ba08d60d39e6c0"} Nov 26 23:58:46 crc kubenswrapper[4903]: I1126 23:58:46.605496 4903 generic.go:334] "Generic (PLEG): container finished" podID="c5faa409-8381-4108-9779-b1e4a5ad51ac" containerID="793e273fb14490c61827610d9128f92571a2d7d744b182e104dba5a7d9ea6020" exitCode=0 Nov 26 23:58:46 crc kubenswrapper[4903]: I1126 23:58:46.605794 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pl2cr" event={"ID":"c5faa409-8381-4108-9779-b1e4a5ad51ac","Type":"ContainerDied","Data":"793e273fb14490c61827610d9128f92571a2d7d744b182e104dba5a7d9ea6020"} Nov 26 23:58:48 crc kubenswrapper[4903]: I1126 23:58:48.635847 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pl2cr" event={"ID":"c5faa409-8381-4108-9779-b1e4a5ad51ac","Type":"ContainerStarted","Data":"0c86fcea082caa8c7f4315ccc6982db260ffddae2cccfccb2259a2f917bedc65"} Nov 26 23:58:49 crc kubenswrapper[4903]: I1126 23:58:49.655011 4903 generic.go:334] "Generic (PLEG): container finished" podID="c5faa409-8381-4108-9779-b1e4a5ad51ac" containerID="0c86fcea082caa8c7f4315ccc6982db260ffddae2cccfccb2259a2f917bedc65" exitCode=0 Nov 26 23:58:49 crc kubenswrapper[4903]: I1126 23:58:49.655068 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pl2cr" event={"ID":"c5faa409-8381-4108-9779-b1e4a5ad51ac","Type":"ContainerDied","Data":"0c86fcea082caa8c7f4315ccc6982db260ffddae2cccfccb2259a2f917bedc65"} Nov 26 23:58:51 crc kubenswrapper[4903]: I1126 23:58:51.690057 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pl2cr" event={"ID":"c5faa409-8381-4108-9779-b1e4a5ad51ac","Type":"ContainerStarted","Data":"97fc548395fee8bd28c0ace4fd13f8e82c2fe25d42d989de7a2024e2c048b0a5"} Nov 26 23:58:51 crc kubenswrapper[4903]: I1126 23:58:51.715494 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-pl2cr" podStartSLOduration=4.129425412 podStartE2EDuration="7.715473408s" podCreationTimestamp="2025-11-26 23:58:44 +0000 UTC" firstStartedPulling="2025-11-26 23:58:46.60770859 +0000 UTC m=+5855.297943500" lastFinishedPulling="2025-11-26 23:58:50.193756556 +0000 UTC m=+5858.883991496" observedRunningTime="2025-11-26 23:58:51.711971625 +0000 UTC m=+5860.402206575" watchObservedRunningTime="2025-11-26 23:58:51.715473408 +0000 UTC m=+5860.405708328" Nov 26 23:58:51 crc kubenswrapper[4903]: I1126 23:58:51.949276 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-w6swq/must-gather-kczv5"] Nov 26 23:58:51 crc kubenswrapper[4903]: I1126 23:58:51.951238 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w6swq/must-gather-kczv5" Nov 26 23:58:51 crc kubenswrapper[4903]: I1126 23:58:51.953644 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-w6swq"/"kube-root-ca.crt" Nov 26 23:58:51 crc kubenswrapper[4903]: I1126 23:58:51.953668 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-w6swq"/"default-dockercfg-npw7t" Nov 26 23:58:51 crc kubenswrapper[4903]: I1126 23:58:51.957711 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-w6swq"/"openshift-service-ca.crt" Nov 26 23:58:51 crc kubenswrapper[4903]: I1126 23:58:51.972209 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-w6swq/must-gather-kczv5"] Nov 26 23:58:52 crc kubenswrapper[4903]: I1126 23:58:52.055187 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/646193d4-60c2-440c-a998-0ec89db5aaf3-must-gather-output\") pod \"must-gather-kczv5\" (UID: \"646193d4-60c2-440c-a998-0ec89db5aaf3\") " pod="openshift-must-gather-w6swq/must-gather-kczv5" Nov 26 23:58:52 crc kubenswrapper[4903]: I1126 23:58:52.055331 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6pg4\" (UniqueName: \"kubernetes.io/projected/646193d4-60c2-440c-a998-0ec89db5aaf3-kube-api-access-k6pg4\") pod \"must-gather-kczv5\" (UID: \"646193d4-60c2-440c-a998-0ec89db5aaf3\") " pod="openshift-must-gather-w6swq/must-gather-kczv5" Nov 26 23:58:52 crc kubenswrapper[4903]: I1126 23:58:52.156933 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/646193d4-60c2-440c-a998-0ec89db5aaf3-must-gather-output\") pod \"must-gather-kczv5\" (UID: \"646193d4-60c2-440c-a998-0ec89db5aaf3\") " pod="openshift-must-gather-w6swq/must-gather-kczv5" Nov 26 23:58:52 crc kubenswrapper[4903]: I1126 23:58:52.157076 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6pg4\" (UniqueName: \"kubernetes.io/projected/646193d4-60c2-440c-a998-0ec89db5aaf3-kube-api-access-k6pg4\") pod \"must-gather-kczv5\" (UID: \"646193d4-60c2-440c-a998-0ec89db5aaf3\") " pod="openshift-must-gather-w6swq/must-gather-kczv5" Nov 26 23:58:52 crc kubenswrapper[4903]: I1126 23:58:52.158525 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/646193d4-60c2-440c-a998-0ec89db5aaf3-must-gather-output\") pod \"must-gather-kczv5\" (UID: \"646193d4-60c2-440c-a998-0ec89db5aaf3\") " pod="openshift-must-gather-w6swq/must-gather-kczv5" Nov 26 23:58:52 crc kubenswrapper[4903]: I1126 23:58:52.177054 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6pg4\" (UniqueName: \"kubernetes.io/projected/646193d4-60c2-440c-a998-0ec89db5aaf3-kube-api-access-k6pg4\") pod \"must-gather-kczv5\" (UID: \"646193d4-60c2-440c-a998-0ec89db5aaf3\") " pod="openshift-must-gather-w6swq/must-gather-kczv5" Nov 26 23:58:52 crc kubenswrapper[4903]: I1126 23:58:52.267399 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w6swq/must-gather-kczv5" Nov 26 23:58:52 crc kubenswrapper[4903]: I1126 23:58:52.752068 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-w6swq/must-gather-kczv5"] Nov 26 23:58:53 crc kubenswrapper[4903]: I1126 23:58:53.717791 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w6swq/must-gather-kczv5" event={"ID":"646193d4-60c2-440c-a998-0ec89db5aaf3","Type":"ContainerStarted","Data":"0c23d77fe7bc5f217e4548604adbb706835697a290ab1dc609b5b71a53f30fa9"} Nov 26 23:58:55 crc kubenswrapper[4903]: I1126 23:58:55.012200 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-pl2cr" Nov 26 23:58:55 crc kubenswrapper[4903]: I1126 23:58:55.012254 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-pl2cr" Nov 26 23:58:55 crc kubenswrapper[4903]: I1126 23:58:55.106144 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-pl2cr" Nov 26 23:59:00 crc kubenswrapper[4903]: I1126 23:59:00.832385 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w6swq/must-gather-kczv5" event={"ID":"646193d4-60c2-440c-a998-0ec89db5aaf3","Type":"ContainerStarted","Data":"f67a7c46625e782ae60275265b652c953d23a3100fa46871dc12c114117c89c8"} Nov 26 23:59:00 crc kubenswrapper[4903]: I1126 23:59:00.833041 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w6swq/must-gather-kczv5" event={"ID":"646193d4-60c2-440c-a998-0ec89db5aaf3","Type":"ContainerStarted","Data":"7aeb1ebe605c4495c166b453e0b234ca6f54afce23dad0c0abb39ace008b10d4"} Nov 26 23:59:00 crc kubenswrapper[4903]: I1126 23:59:00.861017 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-w6swq/must-gather-kczv5" podStartSLOduration=2.883021389 podStartE2EDuration="9.860997697s" podCreationTimestamp="2025-11-26 23:58:51 +0000 UTC" firstStartedPulling="2025-11-26 23:58:52.75528207 +0000 UTC m=+5861.445516980" lastFinishedPulling="2025-11-26 23:58:59.733258368 +0000 UTC m=+5868.423493288" observedRunningTime="2025-11-26 23:59:00.854025801 +0000 UTC m=+5869.544260721" watchObservedRunningTime="2025-11-26 23:59:00.860997697 +0000 UTC m=+5869.551232617" Nov 26 23:59:05 crc kubenswrapper[4903]: I1126 23:59:05.066977 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-pl2cr" Nov 26 23:59:05 crc kubenswrapper[4903]: I1126 23:59:05.114796 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pl2cr"] Nov 26 23:59:05 crc kubenswrapper[4903]: I1126 23:59:05.897009 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-pl2cr" podUID="c5faa409-8381-4108-9779-b1e4a5ad51ac" containerName="registry-server" containerID="cri-o://97fc548395fee8bd28c0ace4fd13f8e82c2fe25d42d989de7a2024e2c048b0a5" gracePeriod=2 Nov 26 23:59:06 crc kubenswrapper[4903]: I1126 23:59:06.103755 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-w6swq/crc-debug-qm4dv"] Nov 26 23:59:06 crc kubenswrapper[4903]: I1126 23:59:06.105325 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w6swq/crc-debug-qm4dv" Nov 26 23:59:06 crc kubenswrapper[4903]: I1126 23:59:06.238982 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e68d7030-24b2-4d52-a178-ff472f3f05d4-host\") pod \"crc-debug-qm4dv\" (UID: \"e68d7030-24b2-4d52-a178-ff472f3f05d4\") " pod="openshift-must-gather-w6swq/crc-debug-qm4dv" Nov 26 23:59:06 crc kubenswrapper[4903]: I1126 23:59:06.239179 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78kjs\" (UniqueName: \"kubernetes.io/projected/e68d7030-24b2-4d52-a178-ff472f3f05d4-kube-api-access-78kjs\") pod \"crc-debug-qm4dv\" (UID: \"e68d7030-24b2-4d52-a178-ff472f3f05d4\") " pod="openshift-must-gather-w6swq/crc-debug-qm4dv" Nov 26 23:59:06 crc kubenswrapper[4903]: I1126 23:59:06.348326 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e68d7030-24b2-4d52-a178-ff472f3f05d4-host\") pod \"crc-debug-qm4dv\" (UID: \"e68d7030-24b2-4d52-a178-ff472f3f05d4\") " pod="openshift-must-gather-w6swq/crc-debug-qm4dv" Nov 26 23:59:06 crc kubenswrapper[4903]: I1126 23:59:06.348420 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e68d7030-24b2-4d52-a178-ff472f3f05d4-host\") pod \"crc-debug-qm4dv\" (UID: \"e68d7030-24b2-4d52-a178-ff472f3f05d4\") " pod="openshift-must-gather-w6swq/crc-debug-qm4dv" Nov 26 23:59:06 crc kubenswrapper[4903]: I1126 23:59:06.362341 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78kjs\" (UniqueName: \"kubernetes.io/projected/e68d7030-24b2-4d52-a178-ff472f3f05d4-kube-api-access-78kjs\") pod \"crc-debug-qm4dv\" (UID: \"e68d7030-24b2-4d52-a178-ff472f3f05d4\") " pod="openshift-must-gather-w6swq/crc-debug-qm4dv" Nov 26 23:59:06 crc kubenswrapper[4903]: I1126 23:59:06.408574 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78kjs\" (UniqueName: \"kubernetes.io/projected/e68d7030-24b2-4d52-a178-ff472f3f05d4-kube-api-access-78kjs\") pod \"crc-debug-qm4dv\" (UID: \"e68d7030-24b2-4d52-a178-ff472f3f05d4\") " pod="openshift-must-gather-w6swq/crc-debug-qm4dv" Nov 26 23:59:06 crc kubenswrapper[4903]: I1126 23:59:06.422933 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w6swq/crc-debug-qm4dv" Nov 26 23:59:06 crc kubenswrapper[4903]: I1126 23:59:06.457497 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pl2cr" Nov 26 23:59:06 crc kubenswrapper[4903]: I1126 23:59:06.572009 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5faa409-8381-4108-9779-b1e4a5ad51ac-utilities\") pod \"c5faa409-8381-4108-9779-b1e4a5ad51ac\" (UID: \"c5faa409-8381-4108-9779-b1e4a5ad51ac\") " Nov 26 23:59:06 crc kubenswrapper[4903]: I1126 23:59:06.572124 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5faa409-8381-4108-9779-b1e4a5ad51ac-catalog-content\") pod \"c5faa409-8381-4108-9779-b1e4a5ad51ac\" (UID: \"c5faa409-8381-4108-9779-b1e4a5ad51ac\") " Nov 26 23:59:06 crc kubenswrapper[4903]: I1126 23:59:06.572247 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mw6cz\" (UniqueName: \"kubernetes.io/projected/c5faa409-8381-4108-9779-b1e4a5ad51ac-kube-api-access-mw6cz\") pod \"c5faa409-8381-4108-9779-b1e4a5ad51ac\" (UID: \"c5faa409-8381-4108-9779-b1e4a5ad51ac\") " Nov 26 23:59:06 crc kubenswrapper[4903]: I1126 23:59:06.573631 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5faa409-8381-4108-9779-b1e4a5ad51ac-utilities" (OuterVolumeSpecName: "utilities") pod "c5faa409-8381-4108-9779-b1e4a5ad51ac" (UID: "c5faa409-8381-4108-9779-b1e4a5ad51ac"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:59:06 crc kubenswrapper[4903]: I1126 23:59:06.583375 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5faa409-8381-4108-9779-b1e4a5ad51ac-kube-api-access-mw6cz" (OuterVolumeSpecName: "kube-api-access-mw6cz") pod "c5faa409-8381-4108-9779-b1e4a5ad51ac" (UID: "c5faa409-8381-4108-9779-b1e4a5ad51ac"). InnerVolumeSpecName "kube-api-access-mw6cz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 23:59:06 crc kubenswrapper[4903]: I1126 23:59:06.599884 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5faa409-8381-4108-9779-b1e4a5ad51ac-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c5faa409-8381-4108-9779-b1e4a5ad51ac" (UID: "c5faa409-8381-4108-9779-b1e4a5ad51ac"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 23:59:06 crc kubenswrapper[4903]: I1126 23:59:06.675378 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5faa409-8381-4108-9779-b1e4a5ad51ac-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 23:59:06 crc kubenswrapper[4903]: I1126 23:59:06.675423 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mw6cz\" (UniqueName: \"kubernetes.io/projected/c5faa409-8381-4108-9779-b1e4a5ad51ac-kube-api-access-mw6cz\") on node \"crc\" DevicePath \"\"" Nov 26 23:59:06 crc kubenswrapper[4903]: I1126 23:59:06.675440 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5faa409-8381-4108-9779-b1e4a5ad51ac-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 23:59:06 crc kubenswrapper[4903]: I1126 23:59:06.950062 4903 generic.go:334] "Generic (PLEG): container finished" podID="c5faa409-8381-4108-9779-b1e4a5ad51ac" containerID="97fc548395fee8bd28c0ace4fd13f8e82c2fe25d42d989de7a2024e2c048b0a5" exitCode=0 Nov 26 23:59:06 crc kubenswrapper[4903]: I1126 23:59:06.950412 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pl2cr" event={"ID":"c5faa409-8381-4108-9779-b1e4a5ad51ac","Type":"ContainerDied","Data":"97fc548395fee8bd28c0ace4fd13f8e82c2fe25d42d989de7a2024e2c048b0a5"} Nov 26 23:59:06 crc kubenswrapper[4903]: I1126 23:59:06.950503 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pl2cr" event={"ID":"c5faa409-8381-4108-9779-b1e4a5ad51ac","Type":"ContainerDied","Data":"5f0a78ccd309849b4bae371a07da804552549c75b93316cd47ba08d60d39e6c0"} Nov 26 23:59:06 crc kubenswrapper[4903]: I1126 23:59:06.950580 4903 scope.go:117] "RemoveContainer" containerID="97fc548395fee8bd28c0ace4fd13f8e82c2fe25d42d989de7a2024e2c048b0a5" Nov 26 23:59:06 crc kubenswrapper[4903]: I1126 23:59:06.951063 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pl2cr" Nov 26 23:59:06 crc kubenswrapper[4903]: I1126 23:59:06.963540 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w6swq/crc-debug-qm4dv" event={"ID":"e68d7030-24b2-4d52-a178-ff472f3f05d4","Type":"ContainerStarted","Data":"64149ef063bae5624e3670b471d3613c77d06c246be597aaa212f56b3332aaa0"} Nov 26 23:59:07 crc kubenswrapper[4903]: I1126 23:59:07.004800 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pl2cr"] Nov 26 23:59:07 crc kubenswrapper[4903]: I1126 23:59:07.025610 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-pl2cr"] Nov 26 23:59:07 crc kubenswrapper[4903]: I1126 23:59:07.032850 4903 scope.go:117] "RemoveContainer" containerID="0c86fcea082caa8c7f4315ccc6982db260ffddae2cccfccb2259a2f917bedc65" Nov 26 23:59:07 crc kubenswrapper[4903]: I1126 23:59:07.101795 4903 scope.go:117] "RemoveContainer" containerID="793e273fb14490c61827610d9128f92571a2d7d744b182e104dba5a7d9ea6020" Nov 26 23:59:07 crc kubenswrapper[4903]: I1126 23:59:07.138216 4903 scope.go:117] "RemoveContainer" containerID="97fc548395fee8bd28c0ace4fd13f8e82c2fe25d42d989de7a2024e2c048b0a5" Nov 26 23:59:07 crc kubenswrapper[4903]: E1126 23:59:07.139301 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"97fc548395fee8bd28c0ace4fd13f8e82c2fe25d42d989de7a2024e2c048b0a5\": container with ID starting with 97fc548395fee8bd28c0ace4fd13f8e82c2fe25d42d989de7a2024e2c048b0a5 not found: ID does not exist" containerID="97fc548395fee8bd28c0ace4fd13f8e82c2fe25d42d989de7a2024e2c048b0a5" Nov 26 23:59:07 crc kubenswrapper[4903]: I1126 23:59:07.139359 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"97fc548395fee8bd28c0ace4fd13f8e82c2fe25d42d989de7a2024e2c048b0a5"} err="failed to get container status \"97fc548395fee8bd28c0ace4fd13f8e82c2fe25d42d989de7a2024e2c048b0a5\": rpc error: code = NotFound desc = could not find container \"97fc548395fee8bd28c0ace4fd13f8e82c2fe25d42d989de7a2024e2c048b0a5\": container with ID starting with 97fc548395fee8bd28c0ace4fd13f8e82c2fe25d42d989de7a2024e2c048b0a5 not found: ID does not exist" Nov 26 23:59:07 crc kubenswrapper[4903]: I1126 23:59:07.139393 4903 scope.go:117] "RemoveContainer" containerID="0c86fcea082caa8c7f4315ccc6982db260ffddae2cccfccb2259a2f917bedc65" Nov 26 23:59:07 crc kubenswrapper[4903]: E1126 23:59:07.140050 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c86fcea082caa8c7f4315ccc6982db260ffddae2cccfccb2259a2f917bedc65\": container with ID starting with 0c86fcea082caa8c7f4315ccc6982db260ffddae2cccfccb2259a2f917bedc65 not found: ID does not exist" containerID="0c86fcea082caa8c7f4315ccc6982db260ffddae2cccfccb2259a2f917bedc65" Nov 26 23:59:07 crc kubenswrapper[4903]: I1126 23:59:07.140094 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c86fcea082caa8c7f4315ccc6982db260ffddae2cccfccb2259a2f917bedc65"} err="failed to get container status \"0c86fcea082caa8c7f4315ccc6982db260ffddae2cccfccb2259a2f917bedc65\": rpc error: code = NotFound desc = could not find container \"0c86fcea082caa8c7f4315ccc6982db260ffddae2cccfccb2259a2f917bedc65\": container with ID starting with 0c86fcea082caa8c7f4315ccc6982db260ffddae2cccfccb2259a2f917bedc65 not found: ID does not exist" Nov 26 23:59:07 crc kubenswrapper[4903]: I1126 23:59:07.140132 4903 scope.go:117] "RemoveContainer" containerID="793e273fb14490c61827610d9128f92571a2d7d744b182e104dba5a7d9ea6020" Nov 26 23:59:07 crc kubenswrapper[4903]: E1126 23:59:07.142497 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"793e273fb14490c61827610d9128f92571a2d7d744b182e104dba5a7d9ea6020\": container with ID starting with 793e273fb14490c61827610d9128f92571a2d7d744b182e104dba5a7d9ea6020 not found: ID does not exist" containerID="793e273fb14490c61827610d9128f92571a2d7d744b182e104dba5a7d9ea6020" Nov 26 23:59:07 crc kubenswrapper[4903]: I1126 23:59:07.142523 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"793e273fb14490c61827610d9128f92571a2d7d744b182e104dba5a7d9ea6020"} err="failed to get container status \"793e273fb14490c61827610d9128f92571a2d7d744b182e104dba5a7d9ea6020\": rpc error: code = NotFound desc = could not find container \"793e273fb14490c61827610d9128f92571a2d7d744b182e104dba5a7d9ea6020\": container with ID starting with 793e273fb14490c61827610d9128f92571a2d7d744b182e104dba5a7d9ea6020 not found: ID does not exist" Nov 26 23:59:08 crc kubenswrapper[4903]: I1126 23:59:08.051167 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5faa409-8381-4108-9779-b1e4a5ad51ac" path="/var/lib/kubelet/pods/c5faa409-8381-4108-9779-b1e4a5ad51ac/volumes" Nov 26 23:59:18 crc kubenswrapper[4903]: I1126 23:59:18.140952 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w6swq/crc-debug-qm4dv" event={"ID":"e68d7030-24b2-4d52-a178-ff472f3f05d4","Type":"ContainerStarted","Data":"92bfe1e9d8f564485dbb92dc392fb152e5cc6ea01f94fe4e8aa674c58da5c098"} Nov 26 23:59:18 crc kubenswrapper[4903]: I1126 23:59:18.155636 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-w6swq/crc-debug-qm4dv" podStartSLOduration=1.670626327 podStartE2EDuration="12.155613861s" podCreationTimestamp="2025-11-26 23:59:06 +0000 UTC" firstStartedPulling="2025-11-26 23:59:06.501087732 +0000 UTC m=+5875.191322642" lastFinishedPulling="2025-11-26 23:59:16.986075266 +0000 UTC m=+5885.676310176" observedRunningTime="2025-11-26 23:59:18.153531156 +0000 UTC m=+5886.843766096" watchObservedRunningTime="2025-11-26 23:59:18.155613861 +0000 UTC m=+5886.845848791" Nov 26 23:59:48 crc kubenswrapper[4903]: I1126 23:59:48.458955 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-tn28r"] Nov 26 23:59:48 crc kubenswrapper[4903]: E1126 23:59:48.460447 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5faa409-8381-4108-9779-b1e4a5ad51ac" containerName="registry-server" Nov 26 23:59:48 crc kubenswrapper[4903]: I1126 23:59:48.460469 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5faa409-8381-4108-9779-b1e4a5ad51ac" containerName="registry-server" Nov 26 23:59:48 crc kubenswrapper[4903]: E1126 23:59:48.460538 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5faa409-8381-4108-9779-b1e4a5ad51ac" containerName="extract-utilities" Nov 26 23:59:48 crc kubenswrapper[4903]: I1126 23:59:48.460552 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5faa409-8381-4108-9779-b1e4a5ad51ac" containerName="extract-utilities" Nov 26 23:59:48 crc kubenswrapper[4903]: E1126 23:59:48.460598 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5faa409-8381-4108-9779-b1e4a5ad51ac" containerName="extract-content" Nov 26 23:59:48 crc kubenswrapper[4903]: I1126 23:59:48.460611 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5faa409-8381-4108-9779-b1e4a5ad51ac" containerName="extract-content" Nov 26 23:59:48 crc kubenswrapper[4903]: I1126 23:59:48.461082 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5faa409-8381-4108-9779-b1e4a5ad51ac" containerName="registry-server" Nov 26 23:59:48 crc kubenswrapper[4903]: I1126 23:59:48.465047 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tn28r" Nov 26 23:59:48 crc kubenswrapper[4903]: I1126 23:59:48.498057 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tn28r"] Nov 26 23:59:48 crc kubenswrapper[4903]: I1126 23:59:48.556328 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c12d5a48-ec5d-4f46-a69f-191fe9029980-catalog-content\") pod \"redhat-operators-tn28r\" (UID: \"c12d5a48-ec5d-4f46-a69f-191fe9029980\") " pod="openshift-marketplace/redhat-operators-tn28r" Nov 26 23:59:48 crc kubenswrapper[4903]: I1126 23:59:48.556475 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c12d5a48-ec5d-4f46-a69f-191fe9029980-utilities\") pod \"redhat-operators-tn28r\" (UID: \"c12d5a48-ec5d-4f46-a69f-191fe9029980\") " pod="openshift-marketplace/redhat-operators-tn28r" Nov 26 23:59:48 crc kubenswrapper[4903]: I1126 23:59:48.556523 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rc58h\" (UniqueName: \"kubernetes.io/projected/c12d5a48-ec5d-4f46-a69f-191fe9029980-kube-api-access-rc58h\") pod \"redhat-operators-tn28r\" (UID: \"c12d5a48-ec5d-4f46-a69f-191fe9029980\") " pod="openshift-marketplace/redhat-operators-tn28r" Nov 26 23:59:48 crc kubenswrapper[4903]: I1126 23:59:48.658723 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c12d5a48-ec5d-4f46-a69f-191fe9029980-catalog-content\") pod \"redhat-operators-tn28r\" (UID: \"c12d5a48-ec5d-4f46-a69f-191fe9029980\") " pod="openshift-marketplace/redhat-operators-tn28r" Nov 26 23:59:48 crc kubenswrapper[4903]: I1126 23:59:48.658809 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c12d5a48-ec5d-4f46-a69f-191fe9029980-utilities\") pod \"redhat-operators-tn28r\" (UID: \"c12d5a48-ec5d-4f46-a69f-191fe9029980\") " pod="openshift-marketplace/redhat-operators-tn28r" Nov 26 23:59:48 crc kubenswrapper[4903]: I1126 23:59:48.658834 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rc58h\" (UniqueName: \"kubernetes.io/projected/c12d5a48-ec5d-4f46-a69f-191fe9029980-kube-api-access-rc58h\") pod \"redhat-operators-tn28r\" (UID: \"c12d5a48-ec5d-4f46-a69f-191fe9029980\") " pod="openshift-marketplace/redhat-operators-tn28r" Nov 26 23:59:48 crc kubenswrapper[4903]: I1126 23:59:48.659207 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c12d5a48-ec5d-4f46-a69f-191fe9029980-catalog-content\") pod \"redhat-operators-tn28r\" (UID: \"c12d5a48-ec5d-4f46-a69f-191fe9029980\") " pod="openshift-marketplace/redhat-operators-tn28r" Nov 26 23:59:48 crc kubenswrapper[4903]: I1126 23:59:48.659310 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c12d5a48-ec5d-4f46-a69f-191fe9029980-utilities\") pod \"redhat-operators-tn28r\" (UID: \"c12d5a48-ec5d-4f46-a69f-191fe9029980\") " pod="openshift-marketplace/redhat-operators-tn28r" Nov 26 23:59:48 crc kubenswrapper[4903]: I1126 23:59:48.687025 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rc58h\" (UniqueName: \"kubernetes.io/projected/c12d5a48-ec5d-4f46-a69f-191fe9029980-kube-api-access-rc58h\") pod \"redhat-operators-tn28r\" (UID: \"c12d5a48-ec5d-4f46-a69f-191fe9029980\") " pod="openshift-marketplace/redhat-operators-tn28r" Nov 26 23:59:48 crc kubenswrapper[4903]: I1126 23:59:48.797116 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tn28r" Nov 26 23:59:49 crc kubenswrapper[4903]: I1126 23:59:49.613106 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tn28r"] Nov 26 23:59:50 crc kubenswrapper[4903]: I1126 23:59:50.501974 4903 generic.go:334] "Generic (PLEG): container finished" podID="c12d5a48-ec5d-4f46-a69f-191fe9029980" containerID="c6e5a13ed6180fff40aa2c3a7129cfd94dd08b3855782c3eadd3735e7e2e40ed" exitCode=0 Nov 26 23:59:50 crc kubenswrapper[4903]: I1126 23:59:50.502056 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tn28r" event={"ID":"c12d5a48-ec5d-4f46-a69f-191fe9029980","Type":"ContainerDied","Data":"c6e5a13ed6180fff40aa2c3a7129cfd94dd08b3855782c3eadd3735e7e2e40ed"} Nov 26 23:59:50 crc kubenswrapper[4903]: I1126 23:59:50.502612 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tn28r" event={"ID":"c12d5a48-ec5d-4f46-a69f-191fe9029980","Type":"ContainerStarted","Data":"1f3de91ffb68554fd8b33eaa6bf952a7025282a819b6ff51bda08acd2a0372a1"} Nov 26 23:59:50 crc kubenswrapper[4903]: I1126 23:59:50.504366 4903 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 23:59:52 crc kubenswrapper[4903]: I1126 23:59:52.533171 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tn28r" event={"ID":"c12d5a48-ec5d-4f46-a69f-191fe9029980","Type":"ContainerStarted","Data":"6f42836a284528359e0c0d0924b5c0fa3cc99dbf4c6f49a2074b3d486a10a21e"} Nov 26 23:59:56 crc kubenswrapper[4903]: I1126 23:59:56.595200 4903 generic.go:334] "Generic (PLEG): container finished" podID="c12d5a48-ec5d-4f46-a69f-191fe9029980" containerID="6f42836a284528359e0c0d0924b5c0fa3cc99dbf4c6f49a2074b3d486a10a21e" exitCode=0 Nov 26 23:59:56 crc kubenswrapper[4903]: I1126 23:59:56.595294 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tn28r" event={"ID":"c12d5a48-ec5d-4f46-a69f-191fe9029980","Type":"ContainerDied","Data":"6f42836a284528359e0c0d0924b5c0fa3cc99dbf4c6f49a2074b3d486a10a21e"} Nov 26 23:59:57 crc kubenswrapper[4903]: I1126 23:59:57.608250 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tn28r" event={"ID":"c12d5a48-ec5d-4f46-a69f-191fe9029980","Type":"ContainerStarted","Data":"a7a6b6cd379553041055553941c720257ea4e5efbb53f02f537aacf1dedf55aa"} Nov 26 23:59:57 crc kubenswrapper[4903]: I1126 23:59:57.640390 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-tn28r" podStartSLOduration=2.972248999 podStartE2EDuration="9.64036204s" podCreationTimestamp="2025-11-26 23:59:48 +0000 UTC" firstStartedPulling="2025-11-26 23:59:50.504171287 +0000 UTC m=+5919.194406197" lastFinishedPulling="2025-11-26 23:59:57.172284328 +0000 UTC m=+5925.862519238" observedRunningTime="2025-11-26 23:59:57.630782275 +0000 UTC m=+5926.321017185" watchObservedRunningTime="2025-11-26 23:59:57.64036204 +0000 UTC m=+5926.330596950" Nov 26 23:59:58 crc kubenswrapper[4903]: I1126 23:59:58.797687 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-tn28r" Nov 26 23:59:58 crc kubenswrapper[4903]: I1126 23:59:58.798008 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tn28r" Nov 26 23:59:59 crc kubenswrapper[4903]: I1126 23:59:59.842299 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tn28r" podUID="c12d5a48-ec5d-4f46-a69f-191fe9029980" containerName="registry-server" probeResult="failure" output=< Nov 26 23:59:59 crc kubenswrapper[4903]: timeout: failed to connect service ":50051" within 1s Nov 26 23:59:59 crc kubenswrapper[4903]: > Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.176773 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-pruner-29403360-cw64v"] Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.178987 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-pruner-29403360-cw64v" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.182169 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"pruner-dockercfg-p7bcw" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.182521 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"serviceca" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.191154 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-purge-29403360-wlbzs"] Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.193331 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-purge-29403360-wlbzs" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.195787 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.230466 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-purge-29403360-tg276"] Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.233261 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-purge-29403360-tg276" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.236353 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.247747 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403360-pkb7k"] Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.249378 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403360-pkb7k" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.254961 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.255184 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.262919 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79b66965-3ee8-42b9-8526-c73cbd4ee362-combined-ca-bundle\") pod \"nova-cell0-db-purge-29403360-wlbzs\" (UID: \"79b66965-3ee8-42b9-8526-c73cbd4ee362\") " pod="openstack/nova-cell0-db-purge-29403360-wlbzs" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.262986 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rp92x\" (UniqueName: \"kubernetes.io/projected/a4ddd645-4995-419b-a345-a9ef14f5b01d-kube-api-access-rp92x\") pod \"nova-cell1-db-purge-29403360-tg276\" (UID: \"a4ddd645-4995-419b-a345-a9ef14f5b01d\") " pod="openstack/nova-cell1-db-purge-29403360-tg276" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.263022 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2f94\" (UniqueName: \"kubernetes.io/projected/79b66965-3ee8-42b9-8526-c73cbd4ee362-kube-api-access-z2f94\") pod \"nova-cell0-db-purge-29403360-wlbzs\" (UID: \"79b66965-3ee8-42b9-8526-c73cbd4ee362\") " pod="openstack/nova-cell0-db-purge-29403360-wlbzs" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.263084 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79b66965-3ee8-42b9-8526-c73cbd4ee362-scripts\") pod \"nova-cell0-db-purge-29403360-wlbzs\" (UID: \"79b66965-3ee8-42b9-8526-c73cbd4ee362\") " pod="openstack/nova-cell0-db-purge-29403360-wlbzs" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.263109 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a4ddd645-4995-419b-a345-a9ef14f5b01d-scripts\") pod \"nova-cell1-db-purge-29403360-tg276\" (UID: \"a4ddd645-4995-419b-a345-a9ef14f5b01d\") " pod="openstack/nova-cell1-db-purge-29403360-tg276" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.263177 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4ddd645-4995-419b-a345-a9ef14f5b01d-config-data\") pod \"nova-cell1-db-purge-29403360-tg276\" (UID: \"a4ddd645-4995-419b-a345-a9ef14f5b01d\") " pod="openstack/nova-cell1-db-purge-29403360-tg276" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.263221 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llczg\" (UniqueName: \"kubernetes.io/projected/7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9-kube-api-access-llczg\") pod \"image-pruner-29403360-cw64v\" (UID: \"7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9\") " pod="openshift-image-registry/image-pruner-29403360-cw64v" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.263244 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9-serviceca\") pod \"image-pruner-29403360-cw64v\" (UID: \"7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9\") " pod="openshift-image-registry/image-pruner-29403360-cw64v" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.263280 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4ddd645-4995-419b-a345-a9ef14f5b01d-combined-ca-bundle\") pod \"nova-cell1-db-purge-29403360-tg276\" (UID: \"a4ddd645-4995-419b-a345-a9ef14f5b01d\") " pod="openstack/nova-cell1-db-purge-29403360-tg276" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.263305 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79b66965-3ee8-42b9-8526-c73cbd4ee362-config-data\") pod \"nova-cell0-db-purge-29403360-wlbzs\" (UID: \"79b66965-3ee8-42b9-8526-c73cbd4ee362\") " pod="openstack/nova-cell0-db-purge-29403360-wlbzs" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.283147 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-pruner-29403360-cw64v"] Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.319945 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-purge-29403360-wlbzs"] Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.338372 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403360-pkb7k"] Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.365427 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79b66965-3ee8-42b9-8526-c73cbd4ee362-config-data\") pod \"nova-cell0-db-purge-29403360-wlbzs\" (UID: \"79b66965-3ee8-42b9-8526-c73cbd4ee362\") " pod="openstack/nova-cell0-db-purge-29403360-wlbzs" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.365491 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79b66965-3ee8-42b9-8526-c73cbd4ee362-combined-ca-bundle\") pod \"nova-cell0-db-purge-29403360-wlbzs\" (UID: \"79b66965-3ee8-42b9-8526-c73cbd4ee362\") " pod="openstack/nova-cell0-db-purge-29403360-wlbzs" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.365560 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rp92x\" (UniqueName: \"kubernetes.io/projected/a4ddd645-4995-419b-a345-a9ef14f5b01d-kube-api-access-rp92x\") pod \"nova-cell1-db-purge-29403360-tg276\" (UID: \"a4ddd645-4995-419b-a345-a9ef14f5b01d\") " pod="openstack/nova-cell1-db-purge-29403360-tg276" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.365602 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8664cdee-ec50-44ab-9573-b929afc36d44-secret-volume\") pod \"collect-profiles-29403360-pkb7k\" (UID: \"8664cdee-ec50-44ab-9573-b929afc36d44\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403360-pkb7k" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.365630 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2f94\" (UniqueName: \"kubernetes.io/projected/79b66965-3ee8-42b9-8526-c73cbd4ee362-kube-api-access-z2f94\") pod \"nova-cell0-db-purge-29403360-wlbzs\" (UID: \"79b66965-3ee8-42b9-8526-c73cbd4ee362\") " pod="openstack/nova-cell0-db-purge-29403360-wlbzs" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.365736 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79b66965-3ee8-42b9-8526-c73cbd4ee362-scripts\") pod \"nova-cell0-db-purge-29403360-wlbzs\" (UID: \"79b66965-3ee8-42b9-8526-c73cbd4ee362\") " pod="openstack/nova-cell0-db-purge-29403360-wlbzs" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.365777 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a4ddd645-4995-419b-a345-a9ef14f5b01d-scripts\") pod \"nova-cell1-db-purge-29403360-tg276\" (UID: \"a4ddd645-4995-419b-a345-a9ef14f5b01d\") " pod="openstack/nova-cell1-db-purge-29403360-tg276" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.365814 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8664cdee-ec50-44ab-9573-b929afc36d44-config-volume\") pod \"collect-profiles-29403360-pkb7k\" (UID: \"8664cdee-ec50-44ab-9573-b929afc36d44\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403360-pkb7k" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.365908 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4ddd645-4995-419b-a345-a9ef14f5b01d-config-data\") pod \"nova-cell1-db-purge-29403360-tg276\" (UID: \"a4ddd645-4995-419b-a345-a9ef14f5b01d\") " pod="openstack/nova-cell1-db-purge-29403360-tg276" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.365945 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lx8h5\" (UniqueName: \"kubernetes.io/projected/8664cdee-ec50-44ab-9573-b929afc36d44-kube-api-access-lx8h5\") pod \"collect-profiles-29403360-pkb7k\" (UID: \"8664cdee-ec50-44ab-9573-b929afc36d44\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403360-pkb7k" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.366020 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llczg\" (UniqueName: \"kubernetes.io/projected/7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9-kube-api-access-llczg\") pod \"image-pruner-29403360-cw64v\" (UID: \"7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9\") " pod="openshift-image-registry/image-pruner-29403360-cw64v" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.366060 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9-serviceca\") pod \"image-pruner-29403360-cw64v\" (UID: \"7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9\") " pod="openshift-image-registry/image-pruner-29403360-cw64v" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.366113 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4ddd645-4995-419b-a345-a9ef14f5b01d-combined-ca-bundle\") pod \"nova-cell1-db-purge-29403360-tg276\" (UID: \"a4ddd645-4995-419b-a345-a9ef14f5b01d\") " pod="openstack/nova-cell1-db-purge-29403360-tg276" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.368381 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9-serviceca\") pod \"image-pruner-29403360-cw64v\" (UID: \"7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9\") " pod="openshift-image-registry/image-pruner-29403360-cw64v" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.372008 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-purge-29403360-tg276"] Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.372351 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a4ddd645-4995-419b-a345-a9ef14f5b01d-scripts\") pod \"nova-cell1-db-purge-29403360-tg276\" (UID: \"a4ddd645-4995-419b-a345-a9ef14f5b01d\") " pod="openstack/nova-cell1-db-purge-29403360-tg276" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.373076 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4ddd645-4995-419b-a345-a9ef14f5b01d-config-data\") pod \"nova-cell1-db-purge-29403360-tg276\" (UID: \"a4ddd645-4995-419b-a345-a9ef14f5b01d\") " pod="openstack/nova-cell1-db-purge-29403360-tg276" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.376226 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79b66965-3ee8-42b9-8526-c73cbd4ee362-scripts\") pod \"nova-cell0-db-purge-29403360-wlbzs\" (UID: \"79b66965-3ee8-42b9-8526-c73cbd4ee362\") " pod="openstack/nova-cell0-db-purge-29403360-wlbzs" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.378254 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4ddd645-4995-419b-a345-a9ef14f5b01d-combined-ca-bundle\") pod \"nova-cell1-db-purge-29403360-tg276\" (UID: \"a4ddd645-4995-419b-a345-a9ef14f5b01d\") " pod="openstack/nova-cell1-db-purge-29403360-tg276" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.378809 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79b66965-3ee8-42b9-8526-c73cbd4ee362-combined-ca-bundle\") pod \"nova-cell0-db-purge-29403360-wlbzs\" (UID: \"79b66965-3ee8-42b9-8526-c73cbd4ee362\") " pod="openstack/nova-cell0-db-purge-29403360-wlbzs" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.379240 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79b66965-3ee8-42b9-8526-c73cbd4ee362-config-data\") pod \"nova-cell0-db-purge-29403360-wlbzs\" (UID: \"79b66965-3ee8-42b9-8526-c73cbd4ee362\") " pod="openstack/nova-cell0-db-purge-29403360-wlbzs" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.382349 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rp92x\" (UniqueName: \"kubernetes.io/projected/a4ddd645-4995-419b-a345-a9ef14f5b01d-kube-api-access-rp92x\") pod \"nova-cell1-db-purge-29403360-tg276\" (UID: \"a4ddd645-4995-419b-a345-a9ef14f5b01d\") " pod="openstack/nova-cell1-db-purge-29403360-tg276" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.392553 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llczg\" (UniqueName: \"kubernetes.io/projected/7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9-kube-api-access-llczg\") pod \"image-pruner-29403360-cw64v\" (UID: \"7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9\") " pod="openshift-image-registry/image-pruner-29403360-cw64v" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.394233 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2f94\" (UniqueName: \"kubernetes.io/projected/79b66965-3ee8-42b9-8526-c73cbd4ee362-kube-api-access-z2f94\") pod \"nova-cell0-db-purge-29403360-wlbzs\" (UID: \"79b66965-3ee8-42b9-8526-c73cbd4ee362\") " pod="openstack/nova-cell0-db-purge-29403360-wlbzs" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.467904 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8664cdee-ec50-44ab-9573-b929afc36d44-secret-volume\") pod \"collect-profiles-29403360-pkb7k\" (UID: \"8664cdee-ec50-44ab-9573-b929afc36d44\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403360-pkb7k" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.468026 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8664cdee-ec50-44ab-9573-b929afc36d44-config-volume\") pod \"collect-profiles-29403360-pkb7k\" (UID: \"8664cdee-ec50-44ab-9573-b929afc36d44\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403360-pkb7k" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.468136 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lx8h5\" (UniqueName: \"kubernetes.io/projected/8664cdee-ec50-44ab-9573-b929afc36d44-kube-api-access-lx8h5\") pod \"collect-profiles-29403360-pkb7k\" (UID: \"8664cdee-ec50-44ab-9573-b929afc36d44\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403360-pkb7k" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.469676 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8664cdee-ec50-44ab-9573-b929afc36d44-config-volume\") pod \"collect-profiles-29403360-pkb7k\" (UID: \"8664cdee-ec50-44ab-9573-b929afc36d44\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403360-pkb7k" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.474380 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8664cdee-ec50-44ab-9573-b929afc36d44-secret-volume\") pod \"collect-profiles-29403360-pkb7k\" (UID: \"8664cdee-ec50-44ab-9573-b929afc36d44\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403360-pkb7k" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.485965 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lx8h5\" (UniqueName: \"kubernetes.io/projected/8664cdee-ec50-44ab-9573-b929afc36d44-kube-api-access-lx8h5\") pod \"collect-profiles-29403360-pkb7k\" (UID: \"8664cdee-ec50-44ab-9573-b929afc36d44\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403360-pkb7k" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.555207 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-pruner-29403360-cw64v" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.590658 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-purge-29403360-wlbzs" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.602327 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-purge-29403360-tg276" Nov 27 00:00:00 crc kubenswrapper[4903]: I1127 00:00:00.613129 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403360-pkb7k" Nov 27 00:00:01 crc kubenswrapper[4903]: I1127 00:00:01.061269 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-pruner-29403360-cw64v"] Nov 27 00:00:01 crc kubenswrapper[4903]: I1127 00:00:01.267226 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-purge-29403360-wlbzs"] Nov 27 00:00:01 crc kubenswrapper[4903]: W1127 00:00:01.342770 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda4ddd645_4995_419b_a345_a9ef14f5b01d.slice/crio-1361f4aa37d542d88c2492bc7592d01a5efdfadb27f72ac12c0890ce633aa0db WatchSource:0}: Error finding container 1361f4aa37d542d88c2492bc7592d01a5efdfadb27f72ac12c0890ce633aa0db: Status 404 returned error can't find the container with id 1361f4aa37d542d88c2492bc7592d01a5efdfadb27f72ac12c0890ce633aa0db Nov 27 00:00:01 crc kubenswrapper[4903]: I1127 00:00:01.351628 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-purge-29403360-tg276"] Nov 27 00:00:01 crc kubenswrapper[4903]: W1127 00:00:01.358329 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8664cdee_ec50_44ab_9573_b929afc36d44.slice/crio-90b514a8e000b0f2b6e8dc3aeedfece46cd27ef9f06a40bd1d0a7fe44174dcd5 WatchSource:0}: Error finding container 90b514a8e000b0f2b6e8dc3aeedfece46cd27ef9f06a40bd1d0a7fe44174dcd5: Status 404 returned error can't find the container with id 90b514a8e000b0f2b6e8dc3aeedfece46cd27ef9f06a40bd1d0a7fe44174dcd5 Nov 27 00:00:01 crc kubenswrapper[4903]: I1127 00:00:01.388834 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403360-pkb7k"] Nov 27 00:00:01 crc kubenswrapper[4903]: I1127 00:00:01.728681 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-pruner-29403360-cw64v" event={"ID":"7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9","Type":"ContainerStarted","Data":"ce96157cb5306d8f7272d5c2207843d2d58af1f95c748bb884632f553f4703b3"} Nov 27 00:00:01 crc kubenswrapper[4903]: I1127 00:00:01.729134 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-pruner-29403360-cw64v" event={"ID":"7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9","Type":"ContainerStarted","Data":"66ffcd6d8e1ed85a150b2e5a3aebd116851f4bac43afbff754f43e217a6dbfa5"} Nov 27 00:00:01 crc kubenswrapper[4903]: I1127 00:00:01.736265 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403360-pkb7k" event={"ID":"8664cdee-ec50-44ab-9573-b929afc36d44","Type":"ContainerStarted","Data":"90b514a8e000b0f2b6e8dc3aeedfece46cd27ef9f06a40bd1d0a7fe44174dcd5"} Nov 27 00:00:01 crc kubenswrapper[4903]: I1127 00:00:01.750257 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-purge-29403360-wlbzs" event={"ID":"79b66965-3ee8-42b9-8526-c73cbd4ee362","Type":"ContainerStarted","Data":"5dbd1025097831cc1ce44146e5d28c2b1cc0aec5723e197af0b281e65a8415db"} Nov 27 00:00:01 crc kubenswrapper[4903]: I1127 00:00:01.750315 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-purge-29403360-wlbzs" event={"ID":"79b66965-3ee8-42b9-8526-c73cbd4ee362","Type":"ContainerStarted","Data":"e1a378a82a34f79c721f5b886b9106c37cbdb208e1ef2c49e10331498b4530f9"} Nov 27 00:00:01 crc kubenswrapper[4903]: I1127 00:00:01.753550 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-purge-29403360-tg276" event={"ID":"a4ddd645-4995-419b-a345-a9ef14f5b01d","Type":"ContainerStarted","Data":"1361f4aa37d542d88c2492bc7592d01a5efdfadb27f72ac12c0890ce633aa0db"} Nov 27 00:00:01 crc kubenswrapper[4903]: I1127 00:00:01.816815 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-pruner-29403360-cw64v" podStartSLOduration=1.816791334 podStartE2EDuration="1.816791334s" podCreationTimestamp="2025-11-27 00:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-27 00:00:01.772164683 +0000 UTC m=+5930.462399613" watchObservedRunningTime="2025-11-27 00:00:01.816791334 +0000 UTC m=+5930.507026244" Nov 27 00:00:01 crc kubenswrapper[4903]: I1127 00:00:01.851795 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-purge-29403360-wlbzs" podStartSLOduration=1.851774698 podStartE2EDuration="1.851774698s" podCreationTimestamp="2025-11-27 00:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-27 00:00:01.796936984 +0000 UTC m=+5930.487171894" watchObservedRunningTime="2025-11-27 00:00:01.851774698 +0000 UTC m=+5930.542009608" Nov 27 00:00:02 crc kubenswrapper[4903]: I1127 00:00:02.794942 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-purge-29403360-tg276" event={"ID":"a4ddd645-4995-419b-a345-a9ef14f5b01d","Type":"ContainerStarted","Data":"197e4513bb1ed82060ed4b03815b25390bcf288ddb0d476e98fc1b2590de0033"} Nov 27 00:00:02 crc kubenswrapper[4903]: I1127 00:00:02.801156 4903 generic.go:334] "Generic (PLEG): container finished" podID="8664cdee-ec50-44ab-9573-b929afc36d44" containerID="88e8402c6f9c095732588e8b18acbdf5744c76a23a43d9766c2ec8470d54c3e9" exitCode=0 Nov 27 00:00:02 crc kubenswrapper[4903]: I1127 00:00:02.801224 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403360-pkb7k" event={"ID":"8664cdee-ec50-44ab-9573-b929afc36d44","Type":"ContainerDied","Data":"88e8402c6f9c095732588e8b18acbdf5744c76a23a43d9766c2ec8470d54c3e9"} Nov 27 00:00:02 crc kubenswrapper[4903]: I1127 00:00:02.813980 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-db-purge-29403360-tg276" podStartSLOduration=2.813923136 podStartE2EDuration="2.813923136s" podCreationTimestamp="2025-11-27 00:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-27 00:00:02.810057532 +0000 UTC m=+5931.500292432" watchObservedRunningTime="2025-11-27 00:00:02.813923136 +0000 UTC m=+5931.504158066" Nov 27 00:00:03 crc kubenswrapper[4903]: I1127 00:00:03.820293 4903 generic.go:334] "Generic (PLEG): container finished" podID="7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9" containerID="ce96157cb5306d8f7272d5c2207843d2d58af1f95c748bb884632f553f4703b3" exitCode=0 Nov 27 00:00:03 crc kubenswrapper[4903]: I1127 00:00:03.820528 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-pruner-29403360-cw64v" event={"ID":"7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9","Type":"ContainerDied","Data":"ce96157cb5306d8f7272d5c2207843d2d58af1f95c748bb884632f553f4703b3"} Nov 27 00:00:04 crc kubenswrapper[4903]: I1127 00:00:04.264087 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403360-pkb7k" Nov 27 00:00:04 crc kubenswrapper[4903]: I1127 00:00:04.384271 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8664cdee-ec50-44ab-9573-b929afc36d44-config-volume\") pod \"8664cdee-ec50-44ab-9573-b929afc36d44\" (UID: \"8664cdee-ec50-44ab-9573-b929afc36d44\") " Nov 27 00:00:04 crc kubenswrapper[4903]: I1127 00:00:04.384943 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lx8h5\" (UniqueName: \"kubernetes.io/projected/8664cdee-ec50-44ab-9573-b929afc36d44-kube-api-access-lx8h5\") pod \"8664cdee-ec50-44ab-9573-b929afc36d44\" (UID: \"8664cdee-ec50-44ab-9573-b929afc36d44\") " Nov 27 00:00:04 crc kubenswrapper[4903]: I1127 00:00:04.385095 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8664cdee-ec50-44ab-9573-b929afc36d44-secret-volume\") pod \"8664cdee-ec50-44ab-9573-b929afc36d44\" (UID: \"8664cdee-ec50-44ab-9573-b929afc36d44\") " Nov 27 00:00:04 crc kubenswrapper[4903]: I1127 00:00:04.385742 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8664cdee-ec50-44ab-9573-b929afc36d44-config-volume" (OuterVolumeSpecName: "config-volume") pod "8664cdee-ec50-44ab-9573-b929afc36d44" (UID: "8664cdee-ec50-44ab-9573-b929afc36d44"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 27 00:00:04 crc kubenswrapper[4903]: I1127 00:00:04.395557 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8664cdee-ec50-44ab-9573-b929afc36d44-kube-api-access-lx8h5" (OuterVolumeSpecName: "kube-api-access-lx8h5") pod "8664cdee-ec50-44ab-9573-b929afc36d44" (UID: "8664cdee-ec50-44ab-9573-b929afc36d44"). InnerVolumeSpecName "kube-api-access-lx8h5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:00:04 crc kubenswrapper[4903]: I1127 00:00:04.402057 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8664cdee-ec50-44ab-9573-b929afc36d44-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "8664cdee-ec50-44ab-9573-b929afc36d44" (UID: "8664cdee-ec50-44ab-9573-b929afc36d44"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:00:04 crc kubenswrapper[4903]: I1127 00:00:04.489299 4903 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8664cdee-ec50-44ab-9573-b929afc36d44-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 27 00:00:04 crc kubenswrapper[4903]: I1127 00:00:04.489332 4903 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8664cdee-ec50-44ab-9573-b929afc36d44-config-volume\") on node \"crc\" DevicePath \"\"" Nov 27 00:00:04 crc kubenswrapper[4903]: I1127 00:00:04.489342 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lx8h5\" (UniqueName: \"kubernetes.io/projected/8664cdee-ec50-44ab-9573-b929afc36d44-kube-api-access-lx8h5\") on node \"crc\" DevicePath \"\"" Nov 27 00:00:04 crc kubenswrapper[4903]: I1127 00:00:04.834758 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403360-pkb7k" Nov 27 00:00:04 crc kubenswrapper[4903]: I1127 00:00:04.836164 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403360-pkb7k" event={"ID":"8664cdee-ec50-44ab-9573-b929afc36d44","Type":"ContainerDied","Data":"90b514a8e000b0f2b6e8dc3aeedfece46cd27ef9f06a40bd1d0a7fe44174dcd5"} Nov 27 00:00:04 crc kubenswrapper[4903]: I1127 00:00:04.836286 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="90b514a8e000b0f2b6e8dc3aeedfece46cd27ef9f06a40bd1d0a7fe44174dcd5" Nov 27 00:00:05 crc kubenswrapper[4903]: I1127 00:00:05.361413 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403315-fdxf8"] Nov 27 00:00:05 crc kubenswrapper[4903]: I1127 00:00:05.375820 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403315-fdxf8"] Nov 27 00:00:05 crc kubenswrapper[4903]: I1127 00:00:05.523643 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-pruner-29403360-cw64v" Nov 27 00:00:05 crc kubenswrapper[4903]: I1127 00:00:05.619335 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-llczg\" (UniqueName: \"kubernetes.io/projected/7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9-kube-api-access-llczg\") pod \"7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9\" (UID: \"7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9\") " Nov 27 00:00:05 crc kubenswrapper[4903]: I1127 00:00:05.619630 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9-serviceca\") pod \"7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9\" (UID: \"7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9\") " Nov 27 00:00:05 crc kubenswrapper[4903]: I1127 00:00:05.620804 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9-serviceca" (OuterVolumeSpecName: "serviceca") pod "7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9" (UID: "7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 27 00:00:05 crc kubenswrapper[4903]: I1127 00:00:05.638963 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9-kube-api-access-llczg" (OuterVolumeSpecName: "kube-api-access-llczg") pod "7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9" (UID: "7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9"). InnerVolumeSpecName "kube-api-access-llczg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:00:05 crc kubenswrapper[4903]: I1127 00:00:05.722554 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-llczg\" (UniqueName: \"kubernetes.io/projected/7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9-kube-api-access-llczg\") on node \"crc\" DevicePath \"\"" Nov 27 00:00:05 crc kubenswrapper[4903]: I1127 00:00:05.722596 4903 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9-serviceca\") on node \"crc\" DevicePath \"\"" Nov 27 00:00:05 crc kubenswrapper[4903]: I1127 00:00:05.845620 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-pruner-29403360-cw64v" event={"ID":"7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9","Type":"ContainerDied","Data":"66ffcd6d8e1ed85a150b2e5a3aebd116851f4bac43afbff754f43e217a6dbfa5"} Nov 27 00:00:05 crc kubenswrapper[4903]: I1127 00:00:05.845652 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-pruner-29403360-cw64v" Nov 27 00:00:05 crc kubenswrapper[4903]: I1127 00:00:05.845658 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="66ffcd6d8e1ed85a150b2e5a3aebd116851f4bac43afbff754f43e217a6dbfa5" Nov 27 00:00:06 crc kubenswrapper[4903]: I1127 00:00:06.041155 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0abe270e-982f-47b0-9635-b267b2095aa4" path="/var/lib/kubelet/pods/0abe270e-982f-47b0-9635-b267b2095aa4/volumes" Nov 27 00:00:09 crc kubenswrapper[4903]: I1127 00:00:09.852023 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tn28r" podUID="c12d5a48-ec5d-4f46-a69f-191fe9029980" containerName="registry-server" probeResult="failure" output=< Nov 27 00:00:09 crc kubenswrapper[4903]: timeout: failed to connect service ":50051" within 1s Nov 27 00:00:09 crc kubenswrapper[4903]: > Nov 27 00:00:09 crc kubenswrapper[4903]: I1127 00:00:09.888635 4903 generic.go:334] "Generic (PLEG): container finished" podID="79b66965-3ee8-42b9-8526-c73cbd4ee362" containerID="5dbd1025097831cc1ce44146e5d28c2b1cc0aec5723e197af0b281e65a8415db" exitCode=0 Nov 27 00:00:09 crc kubenswrapper[4903]: I1127 00:00:09.888685 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-purge-29403360-wlbzs" event={"ID":"79b66965-3ee8-42b9-8526-c73cbd4ee362","Type":"ContainerDied","Data":"5dbd1025097831cc1ce44146e5d28c2b1cc0aec5723e197af0b281e65a8415db"} Nov 27 00:00:09 crc kubenswrapper[4903]: I1127 00:00:09.891020 4903 generic.go:334] "Generic (PLEG): container finished" podID="a4ddd645-4995-419b-a345-a9ef14f5b01d" containerID="197e4513bb1ed82060ed4b03815b25390bcf288ddb0d476e98fc1b2590de0033" exitCode=0 Nov 27 00:00:09 crc kubenswrapper[4903]: I1127 00:00:09.891052 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-purge-29403360-tg276" event={"ID":"a4ddd645-4995-419b-a345-a9ef14f5b01d","Type":"ContainerDied","Data":"197e4513bb1ed82060ed4b03815b25390bcf288ddb0d476e98fc1b2590de0033"} Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.400979 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-purge-29403360-tg276" Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.405318 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-purge-29403360-wlbzs" Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.562204 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4ddd645-4995-419b-a345-a9ef14f5b01d-config-data\") pod \"a4ddd645-4995-419b-a345-a9ef14f5b01d\" (UID: \"a4ddd645-4995-419b-a345-a9ef14f5b01d\") " Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.562479 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79b66965-3ee8-42b9-8526-c73cbd4ee362-scripts\") pod \"79b66965-3ee8-42b9-8526-c73cbd4ee362\" (UID: \"79b66965-3ee8-42b9-8526-c73cbd4ee362\") " Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.562583 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z2f94\" (UniqueName: \"kubernetes.io/projected/79b66965-3ee8-42b9-8526-c73cbd4ee362-kube-api-access-z2f94\") pod \"79b66965-3ee8-42b9-8526-c73cbd4ee362\" (UID: \"79b66965-3ee8-42b9-8526-c73cbd4ee362\") " Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.562653 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79b66965-3ee8-42b9-8526-c73cbd4ee362-combined-ca-bundle\") pod \"79b66965-3ee8-42b9-8526-c73cbd4ee362\" (UID: \"79b66965-3ee8-42b9-8526-c73cbd4ee362\") " Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.562901 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a4ddd645-4995-419b-a345-a9ef14f5b01d-scripts\") pod \"a4ddd645-4995-419b-a345-a9ef14f5b01d\" (UID: \"a4ddd645-4995-419b-a345-a9ef14f5b01d\") " Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.562937 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rp92x\" (UniqueName: \"kubernetes.io/projected/a4ddd645-4995-419b-a345-a9ef14f5b01d-kube-api-access-rp92x\") pod \"a4ddd645-4995-419b-a345-a9ef14f5b01d\" (UID: \"a4ddd645-4995-419b-a345-a9ef14f5b01d\") " Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.562965 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79b66965-3ee8-42b9-8526-c73cbd4ee362-config-data\") pod \"79b66965-3ee8-42b9-8526-c73cbd4ee362\" (UID: \"79b66965-3ee8-42b9-8526-c73cbd4ee362\") " Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.562979 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4ddd645-4995-419b-a345-a9ef14f5b01d-combined-ca-bundle\") pod \"a4ddd645-4995-419b-a345-a9ef14f5b01d\" (UID: \"a4ddd645-4995-419b-a345-a9ef14f5b01d\") " Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.570197 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4ddd645-4995-419b-a345-a9ef14f5b01d-kube-api-access-rp92x" (OuterVolumeSpecName: "kube-api-access-rp92x") pod "a4ddd645-4995-419b-a345-a9ef14f5b01d" (UID: "a4ddd645-4995-419b-a345-a9ef14f5b01d"). InnerVolumeSpecName "kube-api-access-rp92x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.572409 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79b66965-3ee8-42b9-8526-c73cbd4ee362-scripts" (OuterVolumeSpecName: "scripts") pod "79b66965-3ee8-42b9-8526-c73cbd4ee362" (UID: "79b66965-3ee8-42b9-8526-c73cbd4ee362"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.574866 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4ddd645-4995-419b-a345-a9ef14f5b01d-scripts" (OuterVolumeSpecName: "scripts") pod "a4ddd645-4995-419b-a345-a9ef14f5b01d" (UID: "a4ddd645-4995-419b-a345-a9ef14f5b01d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.578348 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79b66965-3ee8-42b9-8526-c73cbd4ee362-kube-api-access-z2f94" (OuterVolumeSpecName: "kube-api-access-z2f94") pod "79b66965-3ee8-42b9-8526-c73cbd4ee362" (UID: "79b66965-3ee8-42b9-8526-c73cbd4ee362"). InnerVolumeSpecName "kube-api-access-z2f94". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.606194 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79b66965-3ee8-42b9-8526-c73cbd4ee362-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "79b66965-3ee8-42b9-8526-c73cbd4ee362" (UID: "79b66965-3ee8-42b9-8526-c73cbd4ee362"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.614298 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4ddd645-4995-419b-a345-a9ef14f5b01d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a4ddd645-4995-419b-a345-a9ef14f5b01d" (UID: "a4ddd645-4995-419b-a345-a9ef14f5b01d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.614432 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79b66965-3ee8-42b9-8526-c73cbd4ee362-config-data" (OuterVolumeSpecName: "config-data") pod "79b66965-3ee8-42b9-8526-c73cbd4ee362" (UID: "79b66965-3ee8-42b9-8526-c73cbd4ee362"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.616866 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4ddd645-4995-419b-a345-a9ef14f5b01d-config-data" (OuterVolumeSpecName: "config-data") pod "a4ddd645-4995-419b-a345-a9ef14f5b01d" (UID: "a4ddd645-4995-419b-a345-a9ef14f5b01d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.668037 4903 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a4ddd645-4995-419b-a345-a9ef14f5b01d-scripts\") on node \"crc\" DevicePath \"\"" Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.668074 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rp92x\" (UniqueName: \"kubernetes.io/projected/a4ddd645-4995-419b-a345-a9ef14f5b01d-kube-api-access-rp92x\") on node \"crc\" DevicePath \"\"" Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.668090 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79b66965-3ee8-42b9-8526-c73cbd4ee362-config-data\") on node \"crc\" DevicePath \"\"" Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.668103 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4ddd645-4995-419b-a345-a9ef14f5b01d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.668115 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4ddd645-4995-419b-a345-a9ef14f5b01d-config-data\") on node \"crc\" DevicePath \"\"" Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.668126 4903 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79b66965-3ee8-42b9-8526-c73cbd4ee362-scripts\") on node \"crc\" DevicePath \"\"" Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.668138 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z2f94\" (UniqueName: \"kubernetes.io/projected/79b66965-3ee8-42b9-8526-c73cbd4ee362-kube-api-access-z2f94\") on node \"crc\" DevicePath \"\"" Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.668151 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79b66965-3ee8-42b9-8526-c73cbd4ee362-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.919566 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-purge-29403360-tg276" event={"ID":"a4ddd645-4995-419b-a345-a9ef14f5b01d","Type":"ContainerDied","Data":"1361f4aa37d542d88c2492bc7592d01a5efdfadb27f72ac12c0890ce633aa0db"} Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.919876 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-purge-29403360-tg276" Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.919917 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1361f4aa37d542d88c2492bc7592d01a5efdfadb27f72ac12c0890ce633aa0db" Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.921231 4903 generic.go:334] "Generic (PLEG): container finished" podID="e68d7030-24b2-4d52-a178-ff472f3f05d4" containerID="92bfe1e9d8f564485dbb92dc392fb152e5cc6ea01f94fe4e8aa674c58da5c098" exitCode=0 Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.921309 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w6swq/crc-debug-qm4dv" event={"ID":"e68d7030-24b2-4d52-a178-ff472f3f05d4","Type":"ContainerDied","Data":"92bfe1e9d8f564485dbb92dc392fb152e5cc6ea01f94fe4e8aa674c58da5c098"} Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.926423 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-purge-29403360-wlbzs" event={"ID":"79b66965-3ee8-42b9-8526-c73cbd4ee362","Type":"ContainerDied","Data":"e1a378a82a34f79c721f5b886b9106c37cbdb208e1ef2c49e10331498b4530f9"} Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.926475 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e1a378a82a34f79c721f5b886b9106c37cbdb208e1ef2c49e10331498b4530f9" Nov 27 00:00:11 crc kubenswrapper[4903]: I1127 00:00:11.926538 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-purge-29403360-wlbzs" Nov 27 00:00:13 crc kubenswrapper[4903]: I1127 00:00:13.050275 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w6swq/crc-debug-qm4dv" Nov 27 00:00:13 crc kubenswrapper[4903]: I1127 00:00:13.087674 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-w6swq/crc-debug-qm4dv"] Nov 27 00:00:13 crc kubenswrapper[4903]: I1127 00:00:13.098208 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-w6swq/crc-debug-qm4dv"] Nov 27 00:00:13 crc kubenswrapper[4903]: I1127 00:00:13.204321 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e68d7030-24b2-4d52-a178-ff472f3f05d4-host\") pod \"e68d7030-24b2-4d52-a178-ff472f3f05d4\" (UID: \"e68d7030-24b2-4d52-a178-ff472f3f05d4\") " Nov 27 00:00:13 crc kubenswrapper[4903]: I1127 00:00:13.204391 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-78kjs\" (UniqueName: \"kubernetes.io/projected/e68d7030-24b2-4d52-a178-ff472f3f05d4-kube-api-access-78kjs\") pod \"e68d7030-24b2-4d52-a178-ff472f3f05d4\" (UID: \"e68d7030-24b2-4d52-a178-ff472f3f05d4\") " Nov 27 00:00:13 crc kubenswrapper[4903]: I1127 00:00:13.204402 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e68d7030-24b2-4d52-a178-ff472f3f05d4-host" (OuterVolumeSpecName: "host") pod "e68d7030-24b2-4d52-a178-ff472f3f05d4" (UID: "e68d7030-24b2-4d52-a178-ff472f3f05d4"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 27 00:00:13 crc kubenswrapper[4903]: I1127 00:00:13.205227 4903 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e68d7030-24b2-4d52-a178-ff472f3f05d4-host\") on node \"crc\" DevicePath \"\"" Nov 27 00:00:13 crc kubenswrapper[4903]: I1127 00:00:13.215988 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e68d7030-24b2-4d52-a178-ff472f3f05d4-kube-api-access-78kjs" (OuterVolumeSpecName: "kube-api-access-78kjs") pod "e68d7030-24b2-4d52-a178-ff472f3f05d4" (UID: "e68d7030-24b2-4d52-a178-ff472f3f05d4"). InnerVolumeSpecName "kube-api-access-78kjs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:00:13 crc kubenswrapper[4903]: I1127 00:00:13.307890 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-78kjs\" (UniqueName: \"kubernetes.io/projected/e68d7030-24b2-4d52-a178-ff472f3f05d4-kube-api-access-78kjs\") on node \"crc\" DevicePath \"\"" Nov 27 00:00:13 crc kubenswrapper[4903]: I1127 00:00:13.947588 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64149ef063bae5624e3670b471d3613c77d06c246be597aaa212f56b3332aaa0" Nov 27 00:00:13 crc kubenswrapper[4903]: I1127 00:00:13.947915 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w6swq/crc-debug-qm4dv" Nov 27 00:00:14 crc kubenswrapper[4903]: I1127 00:00:14.041169 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e68d7030-24b2-4d52-a178-ff472f3f05d4" path="/var/lib/kubelet/pods/e68d7030-24b2-4d52-a178-ff472f3f05d4/volumes" Nov 27 00:00:14 crc kubenswrapper[4903]: I1127 00:00:14.303896 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-w6swq/crc-debug-5xsmp"] Nov 27 00:00:14 crc kubenswrapper[4903]: E1127 00:00:14.304342 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79b66965-3ee8-42b9-8526-c73cbd4ee362" containerName="nova-manage" Nov 27 00:00:14 crc kubenswrapper[4903]: I1127 00:00:14.304354 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="79b66965-3ee8-42b9-8526-c73cbd4ee362" containerName="nova-manage" Nov 27 00:00:14 crc kubenswrapper[4903]: E1127 00:00:14.304390 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8664cdee-ec50-44ab-9573-b929afc36d44" containerName="collect-profiles" Nov 27 00:00:14 crc kubenswrapper[4903]: I1127 00:00:14.304396 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="8664cdee-ec50-44ab-9573-b929afc36d44" containerName="collect-profiles" Nov 27 00:00:14 crc kubenswrapper[4903]: E1127 00:00:14.304421 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9" containerName="image-pruner" Nov 27 00:00:14 crc kubenswrapper[4903]: I1127 00:00:14.304428 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9" containerName="image-pruner" Nov 27 00:00:14 crc kubenswrapper[4903]: E1127 00:00:14.304438 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e68d7030-24b2-4d52-a178-ff472f3f05d4" containerName="container-00" Nov 27 00:00:14 crc kubenswrapper[4903]: I1127 00:00:14.304444 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="e68d7030-24b2-4d52-a178-ff472f3f05d4" containerName="container-00" Nov 27 00:00:14 crc kubenswrapper[4903]: E1127 00:00:14.304465 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4ddd645-4995-419b-a345-a9ef14f5b01d" containerName="nova-manage" Nov 27 00:00:14 crc kubenswrapper[4903]: I1127 00:00:14.304471 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4ddd645-4995-419b-a345-a9ef14f5b01d" containerName="nova-manage" Nov 27 00:00:14 crc kubenswrapper[4903]: I1127 00:00:14.304692 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4ddd645-4995-419b-a345-a9ef14f5b01d" containerName="nova-manage" Nov 27 00:00:14 crc kubenswrapper[4903]: I1127 00:00:14.304726 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="8664cdee-ec50-44ab-9573-b929afc36d44" containerName="collect-profiles" Nov 27 00:00:14 crc kubenswrapper[4903]: I1127 00:00:14.304743 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="e68d7030-24b2-4d52-a178-ff472f3f05d4" containerName="container-00" Nov 27 00:00:14 crc kubenswrapper[4903]: I1127 00:00:14.304758 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="79b66965-3ee8-42b9-8526-c73cbd4ee362" containerName="nova-manage" Nov 27 00:00:14 crc kubenswrapper[4903]: I1127 00:00:14.304773 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c934ce4-ff26-4cdd-b8fe-d6b076ace7f9" containerName="image-pruner" Nov 27 00:00:14 crc kubenswrapper[4903]: I1127 00:00:14.305494 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w6swq/crc-debug-5xsmp" Nov 27 00:00:14 crc kubenswrapper[4903]: I1127 00:00:14.431256 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x4rbg\" (UniqueName: \"kubernetes.io/projected/f5e38698-b988-4851-ba39-c689e0f4eb1f-kube-api-access-x4rbg\") pod \"crc-debug-5xsmp\" (UID: \"f5e38698-b988-4851-ba39-c689e0f4eb1f\") " pod="openshift-must-gather-w6swq/crc-debug-5xsmp" Nov 27 00:00:14 crc kubenswrapper[4903]: I1127 00:00:14.431358 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f5e38698-b988-4851-ba39-c689e0f4eb1f-host\") pod \"crc-debug-5xsmp\" (UID: \"f5e38698-b988-4851-ba39-c689e0f4eb1f\") " pod="openshift-must-gather-w6swq/crc-debug-5xsmp" Nov 27 00:00:14 crc kubenswrapper[4903]: I1127 00:00:14.533111 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f5e38698-b988-4851-ba39-c689e0f4eb1f-host\") pod \"crc-debug-5xsmp\" (UID: \"f5e38698-b988-4851-ba39-c689e0f4eb1f\") " pod="openshift-must-gather-w6swq/crc-debug-5xsmp" Nov 27 00:00:14 crc kubenswrapper[4903]: I1127 00:00:14.533291 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f5e38698-b988-4851-ba39-c689e0f4eb1f-host\") pod \"crc-debug-5xsmp\" (UID: \"f5e38698-b988-4851-ba39-c689e0f4eb1f\") " pod="openshift-must-gather-w6swq/crc-debug-5xsmp" Nov 27 00:00:14 crc kubenswrapper[4903]: I1127 00:00:14.533320 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x4rbg\" (UniqueName: \"kubernetes.io/projected/f5e38698-b988-4851-ba39-c689e0f4eb1f-kube-api-access-x4rbg\") pod \"crc-debug-5xsmp\" (UID: \"f5e38698-b988-4851-ba39-c689e0f4eb1f\") " pod="openshift-must-gather-w6swq/crc-debug-5xsmp" Nov 27 00:00:14 crc kubenswrapper[4903]: I1127 00:00:14.552326 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x4rbg\" (UniqueName: \"kubernetes.io/projected/f5e38698-b988-4851-ba39-c689e0f4eb1f-kube-api-access-x4rbg\") pod \"crc-debug-5xsmp\" (UID: \"f5e38698-b988-4851-ba39-c689e0f4eb1f\") " pod="openshift-must-gather-w6swq/crc-debug-5xsmp" Nov 27 00:00:14 crc kubenswrapper[4903]: I1127 00:00:14.623573 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w6swq/crc-debug-5xsmp" Nov 27 00:00:14 crc kubenswrapper[4903]: W1127 00:00:14.664455 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf5e38698_b988_4851_ba39_c689e0f4eb1f.slice/crio-b465a40de910c9121243a5efee8802ac0ea4bc8949e7560b2056d82bc89c92a3 WatchSource:0}: Error finding container b465a40de910c9121243a5efee8802ac0ea4bc8949e7560b2056d82bc89c92a3: Status 404 returned error can't find the container with id b465a40de910c9121243a5efee8802ac0ea4bc8949e7560b2056d82bc89c92a3 Nov 27 00:00:14 crc kubenswrapper[4903]: I1127 00:00:14.977038 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w6swq/crc-debug-5xsmp" event={"ID":"f5e38698-b988-4851-ba39-c689e0f4eb1f","Type":"ContainerStarted","Data":"981153dadb312b4461573a685c81a450f81374341369fc7b1e658851975e570e"} Nov 27 00:00:14 crc kubenswrapper[4903]: I1127 00:00:14.977305 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w6swq/crc-debug-5xsmp" event={"ID":"f5e38698-b988-4851-ba39-c689e0f4eb1f","Type":"ContainerStarted","Data":"b465a40de910c9121243a5efee8802ac0ea4bc8949e7560b2056d82bc89c92a3"} Nov 27 00:00:15 crc kubenswrapper[4903]: I1127 00:00:15.007429 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-w6swq/crc-debug-5xsmp" podStartSLOduration=1.007409719 podStartE2EDuration="1.007409719s" podCreationTimestamp="2025-11-27 00:00:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-27 00:00:14.998512961 +0000 UTC m=+5943.688747871" watchObservedRunningTime="2025-11-27 00:00:15.007409719 +0000 UTC m=+5943.697644629" Nov 27 00:00:15 crc kubenswrapper[4903]: I1127 00:00:15.986886 4903 generic.go:334] "Generic (PLEG): container finished" podID="f5e38698-b988-4851-ba39-c689e0f4eb1f" containerID="981153dadb312b4461573a685c81a450f81374341369fc7b1e658851975e570e" exitCode=0 Nov 27 00:00:15 crc kubenswrapper[4903]: I1127 00:00:15.987145 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w6swq/crc-debug-5xsmp" event={"ID":"f5e38698-b988-4851-ba39-c689e0f4eb1f","Type":"ContainerDied","Data":"981153dadb312b4461573a685c81a450f81374341369fc7b1e658851975e570e"} Nov 27 00:00:17 crc kubenswrapper[4903]: I1127 00:00:17.120265 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w6swq/crc-debug-5xsmp" Nov 27 00:00:17 crc kubenswrapper[4903]: I1127 00:00:17.286068 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4rbg\" (UniqueName: \"kubernetes.io/projected/f5e38698-b988-4851-ba39-c689e0f4eb1f-kube-api-access-x4rbg\") pod \"f5e38698-b988-4851-ba39-c689e0f4eb1f\" (UID: \"f5e38698-b988-4851-ba39-c689e0f4eb1f\") " Nov 27 00:00:17 crc kubenswrapper[4903]: I1127 00:00:17.286139 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f5e38698-b988-4851-ba39-c689e0f4eb1f-host\") pod \"f5e38698-b988-4851-ba39-c689e0f4eb1f\" (UID: \"f5e38698-b988-4851-ba39-c689e0f4eb1f\") " Nov 27 00:00:17 crc kubenswrapper[4903]: I1127 00:00:17.286254 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f5e38698-b988-4851-ba39-c689e0f4eb1f-host" (OuterVolumeSpecName: "host") pod "f5e38698-b988-4851-ba39-c689e0f4eb1f" (UID: "f5e38698-b988-4851-ba39-c689e0f4eb1f"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 27 00:00:17 crc kubenswrapper[4903]: I1127 00:00:17.286863 4903 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f5e38698-b988-4851-ba39-c689e0f4eb1f-host\") on node \"crc\" DevicePath \"\"" Nov 27 00:00:17 crc kubenswrapper[4903]: I1127 00:00:17.487050 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-w6swq/crc-debug-5xsmp"] Nov 27 00:00:17 crc kubenswrapper[4903]: I1127 00:00:17.497201 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-w6swq/crc-debug-5xsmp"] Nov 27 00:00:17 crc kubenswrapper[4903]: I1127 00:00:17.881495 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5e38698-b988-4851-ba39-c689e0f4eb1f-kube-api-access-x4rbg" (OuterVolumeSpecName: "kube-api-access-x4rbg") pod "f5e38698-b988-4851-ba39-c689e0f4eb1f" (UID: "f5e38698-b988-4851-ba39-c689e0f4eb1f"). InnerVolumeSpecName "kube-api-access-x4rbg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:00:17 crc kubenswrapper[4903]: I1127 00:00:17.902783 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4rbg\" (UniqueName: \"kubernetes.io/projected/f5e38698-b988-4851-ba39-c689e0f4eb1f-kube-api-access-x4rbg\") on node \"crc\" DevicePath \"\"" Nov 27 00:00:18 crc kubenswrapper[4903]: I1127 00:00:18.017897 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b465a40de910c9121243a5efee8802ac0ea4bc8949e7560b2056d82bc89c92a3" Nov 27 00:00:18 crc kubenswrapper[4903]: I1127 00:00:18.017957 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w6swq/crc-debug-5xsmp" Nov 27 00:00:18 crc kubenswrapper[4903]: I1127 00:00:18.048274 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5e38698-b988-4851-ba39-c689e0f4eb1f" path="/var/lib/kubelet/pods/f5e38698-b988-4851-ba39-c689e0f4eb1f/volumes" Nov 27 00:00:19 crc kubenswrapper[4903]: I1127 00:00:19.047022 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-w6swq/crc-debug-wn7n9"] Nov 27 00:00:19 crc kubenswrapper[4903]: E1127 00:00:19.047965 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5e38698-b988-4851-ba39-c689e0f4eb1f" containerName="container-00" Nov 27 00:00:19 crc kubenswrapper[4903]: I1127 00:00:19.047983 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5e38698-b988-4851-ba39-c689e0f4eb1f" containerName="container-00" Nov 27 00:00:19 crc kubenswrapper[4903]: I1127 00:00:19.048359 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5e38698-b988-4851-ba39-c689e0f4eb1f" containerName="container-00" Nov 27 00:00:19 crc kubenswrapper[4903]: I1127 00:00:19.049362 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w6swq/crc-debug-wn7n9" Nov 27 00:00:19 crc kubenswrapper[4903]: I1127 00:00:19.129573 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2c14e363-c5e3-4f5c-b2fd-37589bbfa720-host\") pod \"crc-debug-wn7n9\" (UID: \"2c14e363-c5e3-4f5c-b2fd-37589bbfa720\") " pod="openshift-must-gather-w6swq/crc-debug-wn7n9" Nov 27 00:00:19 crc kubenswrapper[4903]: I1127 00:00:19.130098 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gbpx5\" (UniqueName: \"kubernetes.io/projected/2c14e363-c5e3-4f5c-b2fd-37589bbfa720-kube-api-access-gbpx5\") pod \"crc-debug-wn7n9\" (UID: \"2c14e363-c5e3-4f5c-b2fd-37589bbfa720\") " pod="openshift-must-gather-w6swq/crc-debug-wn7n9" Nov 27 00:00:19 crc kubenswrapper[4903]: I1127 00:00:19.232755 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2c14e363-c5e3-4f5c-b2fd-37589bbfa720-host\") pod \"crc-debug-wn7n9\" (UID: \"2c14e363-c5e3-4f5c-b2fd-37589bbfa720\") " pod="openshift-must-gather-w6swq/crc-debug-wn7n9" Nov 27 00:00:19 crc kubenswrapper[4903]: I1127 00:00:19.232907 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2c14e363-c5e3-4f5c-b2fd-37589bbfa720-host\") pod \"crc-debug-wn7n9\" (UID: \"2c14e363-c5e3-4f5c-b2fd-37589bbfa720\") " pod="openshift-must-gather-w6swq/crc-debug-wn7n9" Nov 27 00:00:19 crc kubenswrapper[4903]: I1127 00:00:19.233107 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gbpx5\" (UniqueName: \"kubernetes.io/projected/2c14e363-c5e3-4f5c-b2fd-37589bbfa720-kube-api-access-gbpx5\") pod \"crc-debug-wn7n9\" (UID: \"2c14e363-c5e3-4f5c-b2fd-37589bbfa720\") " pod="openshift-must-gather-w6swq/crc-debug-wn7n9" Nov 27 00:00:19 crc kubenswrapper[4903]: I1127 00:00:19.254595 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gbpx5\" (UniqueName: \"kubernetes.io/projected/2c14e363-c5e3-4f5c-b2fd-37589bbfa720-kube-api-access-gbpx5\") pod \"crc-debug-wn7n9\" (UID: \"2c14e363-c5e3-4f5c-b2fd-37589bbfa720\") " pod="openshift-must-gather-w6swq/crc-debug-wn7n9" Nov 27 00:00:19 crc kubenswrapper[4903]: I1127 00:00:19.371789 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w6swq/crc-debug-wn7n9" Nov 27 00:00:19 crc kubenswrapper[4903]: W1127 00:00:19.409391 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2c14e363_c5e3_4f5c_b2fd_37589bbfa720.slice/crio-28105cbbf88ac476bb22f383f5212e2d7f53b5f54fb7692098d48203ead11481 WatchSource:0}: Error finding container 28105cbbf88ac476bb22f383f5212e2d7f53b5f54fb7692098d48203ead11481: Status 404 returned error can't find the container with id 28105cbbf88ac476bb22f383f5212e2d7f53b5f54fb7692098d48203ead11481 Nov 27 00:00:19 crc kubenswrapper[4903]: I1127 00:00:19.852008 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tn28r" podUID="c12d5a48-ec5d-4f46-a69f-191fe9029980" containerName="registry-server" probeResult="failure" output=< Nov 27 00:00:19 crc kubenswrapper[4903]: timeout: failed to connect service ":50051" within 1s Nov 27 00:00:19 crc kubenswrapper[4903]: > Nov 27 00:00:20 crc kubenswrapper[4903]: I1127 00:00:20.053911 4903 generic.go:334] "Generic (PLEG): container finished" podID="2c14e363-c5e3-4f5c-b2fd-37589bbfa720" containerID="f130f5aa86dd568ecb77d92f7b8081ffd2b79e98795d6c9a4a39c9cecff280c9" exitCode=0 Nov 27 00:00:20 crc kubenswrapper[4903]: I1127 00:00:20.054134 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w6swq/crc-debug-wn7n9" event={"ID":"2c14e363-c5e3-4f5c-b2fd-37589bbfa720","Type":"ContainerDied","Data":"f130f5aa86dd568ecb77d92f7b8081ffd2b79e98795d6c9a4a39c9cecff280c9"} Nov 27 00:00:20 crc kubenswrapper[4903]: I1127 00:00:20.054196 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w6swq/crc-debug-wn7n9" event={"ID":"2c14e363-c5e3-4f5c-b2fd-37589bbfa720","Type":"ContainerStarted","Data":"28105cbbf88ac476bb22f383f5212e2d7f53b5f54fb7692098d48203ead11481"} Nov 27 00:00:20 crc kubenswrapper[4903]: I1127 00:00:20.113993 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-w6swq/crc-debug-wn7n9"] Nov 27 00:00:20 crc kubenswrapper[4903]: I1127 00:00:20.130517 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-w6swq/crc-debug-wn7n9"] Nov 27 00:00:21 crc kubenswrapper[4903]: I1127 00:00:21.192954 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w6swq/crc-debug-wn7n9" Nov 27 00:00:21 crc kubenswrapper[4903]: I1127 00:00:21.284775 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2c14e363-c5e3-4f5c-b2fd-37589bbfa720-host\") pod \"2c14e363-c5e3-4f5c-b2fd-37589bbfa720\" (UID: \"2c14e363-c5e3-4f5c-b2fd-37589bbfa720\") " Nov 27 00:00:21 crc kubenswrapper[4903]: I1127 00:00:21.284899 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gbpx5\" (UniqueName: \"kubernetes.io/projected/2c14e363-c5e3-4f5c-b2fd-37589bbfa720-kube-api-access-gbpx5\") pod \"2c14e363-c5e3-4f5c-b2fd-37589bbfa720\" (UID: \"2c14e363-c5e3-4f5c-b2fd-37589bbfa720\") " Nov 27 00:00:21 crc kubenswrapper[4903]: I1127 00:00:21.284914 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2c14e363-c5e3-4f5c-b2fd-37589bbfa720-host" (OuterVolumeSpecName: "host") pod "2c14e363-c5e3-4f5c-b2fd-37589bbfa720" (UID: "2c14e363-c5e3-4f5c-b2fd-37589bbfa720"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 27 00:00:21 crc kubenswrapper[4903]: I1127 00:00:21.285662 4903 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2c14e363-c5e3-4f5c-b2fd-37589bbfa720-host\") on node \"crc\" DevicePath \"\"" Nov 27 00:00:21 crc kubenswrapper[4903]: I1127 00:00:21.295850 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c14e363-c5e3-4f5c-b2fd-37589bbfa720-kube-api-access-gbpx5" (OuterVolumeSpecName: "kube-api-access-gbpx5") pod "2c14e363-c5e3-4f5c-b2fd-37589bbfa720" (UID: "2c14e363-c5e3-4f5c-b2fd-37589bbfa720"). InnerVolumeSpecName "kube-api-access-gbpx5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:00:21 crc kubenswrapper[4903]: I1127 00:00:21.388267 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gbpx5\" (UniqueName: \"kubernetes.io/projected/2c14e363-c5e3-4f5c-b2fd-37589bbfa720-kube-api-access-gbpx5\") on node \"crc\" DevicePath \"\"" Nov 27 00:00:22 crc kubenswrapper[4903]: I1127 00:00:22.050632 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c14e363-c5e3-4f5c-b2fd-37589bbfa720" path="/var/lib/kubelet/pods/2c14e363-c5e3-4f5c-b2fd-37589bbfa720/volumes" Nov 27 00:00:22 crc kubenswrapper[4903]: I1127 00:00:22.078890 4903 scope.go:117] "RemoveContainer" containerID="f130f5aa86dd568ecb77d92f7b8081ffd2b79e98795d6c9a4a39c9cecff280c9" Nov 27 00:00:22 crc kubenswrapper[4903]: I1127 00:00:22.079089 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w6swq/crc-debug-wn7n9" Nov 27 00:00:28 crc kubenswrapper[4903]: I1127 00:00:28.316681 4903 scope.go:117] "RemoveContainer" containerID="86714bef74405f7fc7cb3807df2ff2a67d87861904205d3f136d82d52d46e74f" Nov 27 00:00:28 crc kubenswrapper[4903]: I1127 00:00:28.852541 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-tn28r" Nov 27 00:00:28 crc kubenswrapper[4903]: I1127 00:00:28.902879 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-tn28r" Nov 27 00:00:29 crc kubenswrapper[4903]: I1127 00:00:29.093333 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tn28r"] Nov 27 00:00:30 crc kubenswrapper[4903]: I1127 00:00:30.171796 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-tn28r" podUID="c12d5a48-ec5d-4f46-a69f-191fe9029980" containerName="registry-server" containerID="cri-o://a7a6b6cd379553041055553941c720257ea4e5efbb53f02f537aacf1dedf55aa" gracePeriod=2 Nov 27 00:00:30 crc kubenswrapper[4903]: I1127 00:00:30.651639 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tn28r" Nov 27 00:00:30 crc kubenswrapper[4903]: I1127 00:00:30.829161 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c12d5a48-ec5d-4f46-a69f-191fe9029980-catalog-content\") pod \"c12d5a48-ec5d-4f46-a69f-191fe9029980\" (UID: \"c12d5a48-ec5d-4f46-a69f-191fe9029980\") " Nov 27 00:00:30 crc kubenswrapper[4903]: I1127 00:00:30.829358 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c12d5a48-ec5d-4f46-a69f-191fe9029980-utilities\") pod \"c12d5a48-ec5d-4f46-a69f-191fe9029980\" (UID: \"c12d5a48-ec5d-4f46-a69f-191fe9029980\") " Nov 27 00:00:30 crc kubenswrapper[4903]: I1127 00:00:30.829389 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rc58h\" (UniqueName: \"kubernetes.io/projected/c12d5a48-ec5d-4f46-a69f-191fe9029980-kube-api-access-rc58h\") pod \"c12d5a48-ec5d-4f46-a69f-191fe9029980\" (UID: \"c12d5a48-ec5d-4f46-a69f-191fe9029980\") " Nov 27 00:00:30 crc kubenswrapper[4903]: I1127 00:00:30.830088 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c12d5a48-ec5d-4f46-a69f-191fe9029980-utilities" (OuterVolumeSpecName: "utilities") pod "c12d5a48-ec5d-4f46-a69f-191fe9029980" (UID: "c12d5a48-ec5d-4f46-a69f-191fe9029980"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 27 00:00:30 crc kubenswrapper[4903]: I1127 00:00:30.838024 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c12d5a48-ec5d-4f46-a69f-191fe9029980-kube-api-access-rc58h" (OuterVolumeSpecName: "kube-api-access-rc58h") pod "c12d5a48-ec5d-4f46-a69f-191fe9029980" (UID: "c12d5a48-ec5d-4f46-a69f-191fe9029980"). InnerVolumeSpecName "kube-api-access-rc58h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:00:30 crc kubenswrapper[4903]: I1127 00:00:30.917519 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c12d5a48-ec5d-4f46-a69f-191fe9029980-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c12d5a48-ec5d-4f46-a69f-191fe9029980" (UID: "c12d5a48-ec5d-4f46-a69f-191fe9029980"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 27 00:00:30 crc kubenswrapper[4903]: I1127 00:00:30.932082 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c12d5a48-ec5d-4f46-a69f-191fe9029980-utilities\") on node \"crc\" DevicePath \"\"" Nov 27 00:00:30 crc kubenswrapper[4903]: I1127 00:00:30.932275 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rc58h\" (UniqueName: \"kubernetes.io/projected/c12d5a48-ec5d-4f46-a69f-191fe9029980-kube-api-access-rc58h\") on node \"crc\" DevicePath \"\"" Nov 27 00:00:30 crc kubenswrapper[4903]: I1127 00:00:30.932336 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c12d5a48-ec5d-4f46-a69f-191fe9029980-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 27 00:00:31 crc kubenswrapper[4903]: I1127 00:00:31.183984 4903 generic.go:334] "Generic (PLEG): container finished" podID="c12d5a48-ec5d-4f46-a69f-191fe9029980" containerID="a7a6b6cd379553041055553941c720257ea4e5efbb53f02f537aacf1dedf55aa" exitCode=0 Nov 27 00:00:31 crc kubenswrapper[4903]: I1127 00:00:31.184021 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tn28r" event={"ID":"c12d5a48-ec5d-4f46-a69f-191fe9029980","Type":"ContainerDied","Data":"a7a6b6cd379553041055553941c720257ea4e5efbb53f02f537aacf1dedf55aa"} Nov 27 00:00:31 crc kubenswrapper[4903]: I1127 00:00:31.184049 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tn28r" event={"ID":"c12d5a48-ec5d-4f46-a69f-191fe9029980","Type":"ContainerDied","Data":"1f3de91ffb68554fd8b33eaa6bf952a7025282a819b6ff51bda08acd2a0372a1"} Nov 27 00:00:31 crc kubenswrapper[4903]: I1127 00:00:31.184065 4903 scope.go:117] "RemoveContainer" containerID="a7a6b6cd379553041055553941c720257ea4e5efbb53f02f537aacf1dedf55aa" Nov 27 00:00:31 crc kubenswrapper[4903]: I1127 00:00:31.184119 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tn28r" Nov 27 00:00:31 crc kubenswrapper[4903]: I1127 00:00:31.208096 4903 scope.go:117] "RemoveContainer" containerID="6f42836a284528359e0c0d0924b5c0fa3cc99dbf4c6f49a2074b3d486a10a21e" Nov 27 00:00:31 crc kubenswrapper[4903]: I1127 00:00:31.223111 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tn28r"] Nov 27 00:00:31 crc kubenswrapper[4903]: I1127 00:00:31.231200 4903 scope.go:117] "RemoveContainer" containerID="c6e5a13ed6180fff40aa2c3a7129cfd94dd08b3855782c3eadd3735e7e2e40ed" Nov 27 00:00:31 crc kubenswrapper[4903]: I1127 00:00:31.232450 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-tn28r"] Nov 27 00:00:31 crc kubenswrapper[4903]: I1127 00:00:31.276254 4903 scope.go:117] "RemoveContainer" containerID="a7a6b6cd379553041055553941c720257ea4e5efbb53f02f537aacf1dedf55aa" Nov 27 00:00:31 crc kubenswrapper[4903]: E1127 00:00:31.276780 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7a6b6cd379553041055553941c720257ea4e5efbb53f02f537aacf1dedf55aa\": container with ID starting with a7a6b6cd379553041055553941c720257ea4e5efbb53f02f537aacf1dedf55aa not found: ID does not exist" containerID="a7a6b6cd379553041055553941c720257ea4e5efbb53f02f537aacf1dedf55aa" Nov 27 00:00:31 crc kubenswrapper[4903]: I1127 00:00:31.276841 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7a6b6cd379553041055553941c720257ea4e5efbb53f02f537aacf1dedf55aa"} err="failed to get container status \"a7a6b6cd379553041055553941c720257ea4e5efbb53f02f537aacf1dedf55aa\": rpc error: code = NotFound desc = could not find container \"a7a6b6cd379553041055553941c720257ea4e5efbb53f02f537aacf1dedf55aa\": container with ID starting with a7a6b6cd379553041055553941c720257ea4e5efbb53f02f537aacf1dedf55aa not found: ID does not exist" Nov 27 00:00:31 crc kubenswrapper[4903]: I1127 00:00:31.276876 4903 scope.go:117] "RemoveContainer" containerID="6f42836a284528359e0c0d0924b5c0fa3cc99dbf4c6f49a2074b3d486a10a21e" Nov 27 00:00:31 crc kubenswrapper[4903]: E1127 00:00:31.277384 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f42836a284528359e0c0d0924b5c0fa3cc99dbf4c6f49a2074b3d486a10a21e\": container with ID starting with 6f42836a284528359e0c0d0924b5c0fa3cc99dbf4c6f49a2074b3d486a10a21e not found: ID does not exist" containerID="6f42836a284528359e0c0d0924b5c0fa3cc99dbf4c6f49a2074b3d486a10a21e" Nov 27 00:00:31 crc kubenswrapper[4903]: I1127 00:00:31.277424 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f42836a284528359e0c0d0924b5c0fa3cc99dbf4c6f49a2074b3d486a10a21e"} err="failed to get container status \"6f42836a284528359e0c0d0924b5c0fa3cc99dbf4c6f49a2074b3d486a10a21e\": rpc error: code = NotFound desc = could not find container \"6f42836a284528359e0c0d0924b5c0fa3cc99dbf4c6f49a2074b3d486a10a21e\": container with ID starting with 6f42836a284528359e0c0d0924b5c0fa3cc99dbf4c6f49a2074b3d486a10a21e not found: ID does not exist" Nov 27 00:00:31 crc kubenswrapper[4903]: I1127 00:00:31.277456 4903 scope.go:117] "RemoveContainer" containerID="c6e5a13ed6180fff40aa2c3a7129cfd94dd08b3855782c3eadd3735e7e2e40ed" Nov 27 00:00:31 crc kubenswrapper[4903]: E1127 00:00:31.277813 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6e5a13ed6180fff40aa2c3a7129cfd94dd08b3855782c3eadd3735e7e2e40ed\": container with ID starting with c6e5a13ed6180fff40aa2c3a7129cfd94dd08b3855782c3eadd3735e7e2e40ed not found: ID does not exist" containerID="c6e5a13ed6180fff40aa2c3a7129cfd94dd08b3855782c3eadd3735e7e2e40ed" Nov 27 00:00:31 crc kubenswrapper[4903]: I1127 00:00:31.277858 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6e5a13ed6180fff40aa2c3a7129cfd94dd08b3855782c3eadd3735e7e2e40ed"} err="failed to get container status \"c6e5a13ed6180fff40aa2c3a7129cfd94dd08b3855782c3eadd3735e7e2e40ed\": rpc error: code = NotFound desc = could not find container \"c6e5a13ed6180fff40aa2c3a7129cfd94dd08b3855782c3eadd3735e7e2e40ed\": container with ID starting with c6e5a13ed6180fff40aa2c3a7129cfd94dd08b3855782c3eadd3735e7e2e40ed not found: ID does not exist" Nov 27 00:00:31 crc kubenswrapper[4903]: I1127 00:00:31.980770 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 27 00:00:31 crc kubenswrapper[4903]: I1127 00:00:31.980826 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 27 00:00:32 crc kubenswrapper[4903]: I1127 00:00:32.040400 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c12d5a48-ec5d-4f46-a69f-191fe9029980" path="/var/lib/kubelet/pods/c12d5a48-ec5d-4f46-a69f-191fe9029980/volumes" Nov 27 00:00:46 crc kubenswrapper[4903]: I1127 00:00:46.940064 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_e8d0ddee-85d8-40d5-9cfc-d279c65aa4be/aodh-api/0.log" Nov 27 00:00:47 crc kubenswrapper[4903]: I1127 00:00:47.147799 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_e8d0ddee-85d8-40d5-9cfc-d279c65aa4be/aodh-evaluator/0.log" Nov 27 00:00:47 crc kubenswrapper[4903]: I1127 00:00:47.152820 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_e8d0ddee-85d8-40d5-9cfc-d279c65aa4be/aodh-listener/0.log" Nov 27 00:00:47 crc kubenswrapper[4903]: I1127 00:00:47.174583 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_e8d0ddee-85d8-40d5-9cfc-d279c65aa4be/aodh-notifier/0.log" Nov 27 00:00:47 crc kubenswrapper[4903]: I1127 00:00:47.330431 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-86bcb477db-8xtr8_37c314f3-5577-423f-887f-7c551f339c3b/barbican-api-log/0.log" Nov 27 00:00:47 crc kubenswrapper[4903]: I1127 00:00:47.333334 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-86bcb477db-8xtr8_37c314f3-5577-423f-887f-7c551f339c3b/barbican-api/0.log" Nov 27 00:00:47 crc kubenswrapper[4903]: I1127 00:00:47.366084 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-79464fcfdb-twqnx_8824059c-5e2d-4ce5-b224-fc144593d08d/barbican-keystone-listener/0.log" Nov 27 00:00:47 crc kubenswrapper[4903]: I1127 00:00:47.579277 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-58b8c46b65-kdwlj_40944cbe-7c1b-43b0-bed6-28f9490a0d5f/barbican-worker/0.log" Nov 27 00:00:47 crc kubenswrapper[4903]: I1127 00:00:47.646001 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-58b8c46b65-kdwlj_40944cbe-7c1b-43b0-bed6-28f9490a0d5f/barbican-worker-log/0.log" Nov 27 00:00:47 crc kubenswrapper[4903]: I1127 00:00:47.656019 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-79464fcfdb-twqnx_8824059c-5e2d-4ce5-b224-fc144593d08d/barbican-keystone-listener-log/0.log" Nov 27 00:00:47 crc kubenswrapper[4903]: I1127 00:00:47.788100 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2_e9c5ea47-6ef3-44d4-b710-d11a2367448e/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:00:47 crc kubenswrapper[4903]: I1127 00:00:47.906004 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_8c528520-edac-42d3-a81c-f5aca4d05266/ceilometer-central-agent/0.log" Nov 27 00:00:48 crc kubenswrapper[4903]: I1127 00:00:48.041939 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_8c528520-edac-42d3-a81c-f5aca4d05266/proxy-httpd/0.log" Nov 27 00:00:48 crc kubenswrapper[4903]: I1127 00:00:48.049838 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_8c528520-edac-42d3-a81c-f5aca4d05266/ceilometer-notification-agent/0.log" Nov 27 00:00:48 crc kubenswrapper[4903]: I1127 00:00:48.096913 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_8c528520-edac-42d3-a81c-f5aca4d05266/sg-core/0.log" Nov 27 00:00:48 crc kubenswrapper[4903]: I1127 00:00:48.222239 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_172525bd-6c7f-4e76-b7b4-47c937c33a14/cinder-api-log/0.log" Nov 27 00:00:48 crc kubenswrapper[4903]: I1127 00:00:48.340362 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_172525bd-6c7f-4e76-b7b4-47c937c33a14/cinder-api/0.log" Nov 27 00:00:48 crc kubenswrapper[4903]: I1127 00:00:48.396824 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_40127bc0-c09b-4c3f-af93-cdfcaee9d36e/cinder-scheduler/0.log" Nov 27 00:00:48 crc kubenswrapper[4903]: I1127 00:00:48.441622 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_40127bc0-c09b-4c3f-af93-cdfcaee9d36e/probe/0.log" Nov 27 00:00:48 crc kubenswrapper[4903]: I1127 00:00:48.606638 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-65pnw_59398ac1-b8ae-47b3-b00e-f9f245b4eb27/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:00:48 crc kubenswrapper[4903]: I1127 00:00:48.653617 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-dg882_e186f675-8a6e-4e8d-8531-247e10617355/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:00:48 crc kubenswrapper[4903]: I1127 00:00:48.784269 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6f6df4f56c-4fkw5_9af2401e-79f8-4a02-be46-995607766071/init/0.log" Nov 27 00:00:48 crc kubenswrapper[4903]: I1127 00:00:48.993377 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6f6df4f56c-4fkw5_9af2401e-79f8-4a02-be46-995607766071/init/0.log" Nov 27 00:00:49 crc kubenswrapper[4903]: I1127 00:00:49.039725 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6f6df4f56c-4fkw5_9af2401e-79f8-4a02-be46-995607766071/dnsmasq-dns/0.log" Nov 27 00:00:49 crc kubenswrapper[4903]: I1127 00:00:49.072895 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-k665c_8115e93a-72c0-4022-a687-6b58fb3c45ab/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:00:49 crc kubenswrapper[4903]: I1127 00:00:49.257046 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_e9527a05-6356-4ee8-8e07-5557453ad8c2/glance-httpd/0.log" Nov 27 00:00:49 crc kubenswrapper[4903]: I1127 00:00:49.280186 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_e9527a05-6356-4ee8-8e07-5557453ad8c2/glance-log/0.log" Nov 27 00:00:50 crc kubenswrapper[4903]: I1127 00:00:50.205032 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_d500fa23-7825-4dde-95b4-dce1b93b24cb/glance-httpd/0.log" Nov 27 00:00:50 crc kubenswrapper[4903]: I1127 00:00:50.274228 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_d500fa23-7825-4dde-95b4-dce1b93b24cb/glance-log/0.log" Nov 27 00:00:50 crc kubenswrapper[4903]: I1127 00:00:50.891843 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-engine-66bc977bcf-w4wg7_cc6fa80e-1db0-4944-9c07-04df732f4914/heat-engine/0.log" Nov 27 00:00:51 crc kubenswrapper[4903]: I1127 00:00:51.039931 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6_ab43bf25-eae9-472d-80d9-0e91478c8302/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:00:51 crc kubenswrapper[4903]: I1127 00:00:51.163791 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-wh5kt_31e264f2-c649-43e0-af90-ca65e2cb84da/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:00:51 crc kubenswrapper[4903]: I1127 00:00:51.176009 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-api-867c798764-xfxnw_e0f6d1e3-0e99-495c-a8da-005cc8d05e25/heat-api/0.log" Nov 27 00:00:51 crc kubenswrapper[4903]: I1127 00:00:51.272337 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-cfnapi-766bc64666-vhfgd_3abe2357-63af-453d-9e93-3d087275e569/heat-cfnapi/0.log" Nov 27 00:00:51 crc kubenswrapper[4903]: I1127 00:00:51.299394 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29403301-ljvz7_be70a483-c763-4980-a995-61d1a6f5573e/keystone-cron/0.log" Nov 27 00:00:52 crc kubenswrapper[4903]: I1127 00:00:52.084827 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_0f92d7db-9155-4bdc-8285-29091382434c/kube-state-metrics/0.log" Nov 27 00:00:52 crc kubenswrapper[4903]: I1127 00:00:52.265225 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-8jt67_402f509f-516b-446b-a5e8-f42c6aa65ed7/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:00:52 crc kubenswrapper[4903]: I1127 00:00:52.344089 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-fcdf5f968-7ppxk_b3f65d25-6e7d-4b8e-99e1-c75c39abb982/keystone-api/0.log" Nov 27 00:00:52 crc kubenswrapper[4903]: I1127 00:00:52.419200 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_logging-edpm-deployment-openstack-edpm-ipam-bg8bq_c177b1fa-8c5c-43f2-bb1d-c1695ccf0050/logging-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:00:52 crc kubenswrapper[4903]: I1127 00:00:52.543328 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mysqld-exporter-0_a0677cff-9cf4-4eba-bb4b-4fea82d38f71/mysqld-exporter/0.log" Nov 27 00:00:52 crc kubenswrapper[4903]: I1127 00:00:52.911610 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb_c8b7f7a3-07d3-46c8-a5e2-0b08c743d466/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:00:52 crc kubenswrapper[4903]: I1127 00:00:52.916034 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5bbd968879-hmnnt_710e7305-de14-46ea-8cc9-1cbc9dcf0a44/neutron-api/0.log" Nov 27 00:00:52 crc kubenswrapper[4903]: I1127 00:00:52.922780 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5bbd968879-hmnnt_710e7305-de14-46ea-8cc9-1cbc9dcf0a44/neutron-httpd/0.log" Nov 27 00:00:53 crc kubenswrapper[4903]: I1127 00:00:53.444279 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_47dfaf47-3f8b-4355-8c56-a0955f49d95f/nova-cell0-conductor-conductor/0.log" Nov 27 00:00:53 crc kubenswrapper[4903]: I1127 00:00:53.590236 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-db-purge-29403360-wlbzs_79b66965-3ee8-42b9-8526-c73cbd4ee362/nova-manage/0.log" Nov 27 00:00:53 crc kubenswrapper[4903]: I1127 00:00:53.762999 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_d23a575d-55d9-4805-bfee-09f92b0b97ef/nova-api-log/0.log" Nov 27 00:00:53 crc kubenswrapper[4903]: I1127 00:00:53.921656 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_7d4be6e3-d909-4e4f-b5a0-3c949c02421a/nova-cell1-conductor-conductor/0.log" Nov 27 00:00:54 crc kubenswrapper[4903]: I1127 00:00:54.141627 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-db-purge-29403360-tg276_a4ddd645-4995-419b-a345-a9ef14f5b01d/nova-manage/0.log" Nov 27 00:00:54 crc kubenswrapper[4903]: I1127 00:00:54.199404 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_d23a575d-55d9-4805-bfee-09f92b0b97ef/nova-api-api/0.log" Nov 27 00:00:54 crc kubenswrapper[4903]: I1127 00:00:54.388336 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_81026daf-ddcc-4599-8458-b8280d48c920/nova-cell1-novncproxy-novncproxy/0.log" Nov 27 00:00:54 crc kubenswrapper[4903]: I1127 00:00:54.453752 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-r5l7j_be764009-e30d-4394-b38c-83996b86b9e1/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:00:54 crc kubenswrapper[4903]: I1127 00:00:54.647617 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_ecafb017-7ef9-492e-95d5-d297ec3c9725/nova-metadata-log/0.log" Nov 27 00:00:54 crc kubenswrapper[4903]: I1127 00:00:54.963746 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_646d5a7a-f188-4dc2-99ac-24c16bcf59fc/nova-scheduler-scheduler/0.log" Nov 27 00:00:54 crc kubenswrapper[4903]: I1127 00:00:54.975110 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_aabcbcd0-4cc0-495d-b059-6b8722c47aa1/mysql-bootstrap/0.log" Nov 27 00:00:55 crc kubenswrapper[4903]: I1127 00:00:55.241258 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_aabcbcd0-4cc0-495d-b059-6b8722c47aa1/mysql-bootstrap/0.log" Nov 27 00:00:55 crc kubenswrapper[4903]: I1127 00:00:55.260590 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_aabcbcd0-4cc0-495d-b059-6b8722c47aa1/galera/0.log" Nov 27 00:00:55 crc kubenswrapper[4903]: I1127 00:00:55.488498 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_b1969a76-48dc-4a53-8ee9-f9b5a5670e30/mysql-bootstrap/0.log" Nov 27 00:00:55 crc kubenswrapper[4903]: I1127 00:00:55.668446 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_b1969a76-48dc-4a53-8ee9-f9b5a5670e30/mysql-bootstrap/0.log" Nov 27 00:00:55 crc kubenswrapper[4903]: I1127 00:00:55.671211 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_b1969a76-48dc-4a53-8ee9-f9b5a5670e30/galera/0.log" Nov 27 00:00:55 crc kubenswrapper[4903]: I1127 00:00:55.822903 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_0057fb69-c2f2-4f5f-ad83-5fd16fcc99b0/openstackclient/0.log" Nov 27 00:00:55 crc kubenswrapper[4903]: I1127 00:00:55.979391 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-kzb8j_1aa29ea2-aaab-435e-9995-41a5f137be03/ovn-controller/0.log" Nov 27 00:00:56 crc kubenswrapper[4903]: I1127 00:00:56.089602 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-ksn6g_d95087b9-4f77-4f65-b7bd-b799e673de6f/openstack-network-exporter/0.log" Nov 27 00:00:56 crc kubenswrapper[4903]: I1127 00:00:56.262757 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-mv2r4_bfc848b9-8183-4fb5-b8ce-d9542294079f/ovsdb-server-init/0.log" Nov 27 00:00:56 crc kubenswrapper[4903]: I1127 00:00:56.468257 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-mv2r4_bfc848b9-8183-4fb5-b8ce-d9542294079f/ovsdb-server-init/0.log" Nov 27 00:00:56 crc kubenswrapper[4903]: I1127 00:00:56.472280 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-mv2r4_bfc848b9-8183-4fb5-b8ce-d9542294079f/ovs-vswitchd/0.log" Nov 27 00:00:56 crc kubenswrapper[4903]: I1127 00:00:56.528593 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-mv2r4_bfc848b9-8183-4fb5-b8ce-d9542294079f/ovsdb-server/0.log" Nov 27 00:00:56 crc kubenswrapper[4903]: I1127 00:00:56.719051 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-wcwtk_d791fd3c-48a9-44e4-85e2-9e0f088ecb6c/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:00:56 crc kubenswrapper[4903]: I1127 00:00:56.724155 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_ecafb017-7ef9-492e-95d5-d297ec3c9725/nova-metadata-metadata/0.log" Nov 27 00:00:56 crc kubenswrapper[4903]: I1127 00:00:56.886839 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_16d5105b-5e4e-4806-a873-a79e1aaccc68/openstack-network-exporter/0.log" Nov 27 00:00:56 crc kubenswrapper[4903]: I1127 00:00:56.931792 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_16d5105b-5e4e-4806-a873-a79e1aaccc68/ovn-northd/0.log" Nov 27 00:00:57 crc kubenswrapper[4903]: I1127 00:00:57.026068 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e/openstack-network-exporter/0.log" Nov 27 00:00:57 crc kubenswrapper[4903]: I1127 00:00:57.123913 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e/ovsdbserver-nb/0.log" Nov 27 00:00:57 crc kubenswrapper[4903]: I1127 00:00:57.209108 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_4286478b-1146-4f96-8819-753c3f6a6158/openstack-network-exporter/0.log" Nov 27 00:00:57 crc kubenswrapper[4903]: I1127 00:00:57.274892 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_4286478b-1146-4f96-8819-753c3f6a6158/ovsdbserver-sb/0.log" Nov 27 00:00:57 crc kubenswrapper[4903]: I1127 00:00:57.761018 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-747d9754b8-8kqq9_74c08acb-478e-442a-b66d-5f29e75790f4/placement-api/0.log" Nov 27 00:00:57 crc kubenswrapper[4903]: I1127 00:00:57.772048 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_a1ef57dd-556d-40c3-8691-c7e55171a7a6/init-config-reloader/0.log" Nov 27 00:00:57 crc kubenswrapper[4903]: I1127 00:00:57.823516 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-747d9754b8-8kqq9_74c08acb-478e-442a-b66d-5f29e75790f4/placement-log/0.log" Nov 27 00:00:57 crc kubenswrapper[4903]: I1127 00:00:57.961282 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_a1ef57dd-556d-40c3-8691-c7e55171a7a6/config-reloader/0.log" Nov 27 00:00:57 crc kubenswrapper[4903]: I1127 00:00:57.964222 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_a1ef57dd-556d-40c3-8691-c7e55171a7a6/init-config-reloader/0.log" Nov 27 00:00:58 crc kubenswrapper[4903]: I1127 00:00:58.044959 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_a1ef57dd-556d-40c3-8691-c7e55171a7a6/prometheus/0.log" Nov 27 00:00:58 crc kubenswrapper[4903]: I1127 00:00:58.051910 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_a1ef57dd-556d-40c3-8691-c7e55171a7a6/thanos-sidecar/0.log" Nov 27 00:00:58 crc kubenswrapper[4903]: I1127 00:00:58.230277 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_f32ba682-7919-4290-adff-40b16ea07fed/setup-container/0.log" Nov 27 00:00:58 crc kubenswrapper[4903]: I1127 00:00:58.398912 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_f32ba682-7919-4290-adff-40b16ea07fed/setup-container/0.log" Nov 27 00:00:58 crc kubenswrapper[4903]: I1127 00:00:58.417547 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_f32ba682-7919-4290-adff-40b16ea07fed/rabbitmq/0.log" Nov 27 00:00:58 crc kubenswrapper[4903]: I1127 00:00:58.461370 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_5c06e745-2d71-48e5-9cf2-e361471b9b74/setup-container/0.log" Nov 27 00:00:58 crc kubenswrapper[4903]: I1127 00:00:58.681581 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_5c06e745-2d71-48e5-9cf2-e361471b9b74/setup-container/0.log" Nov 27 00:00:58 crc kubenswrapper[4903]: I1127 00:00:58.729184 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_5c06e745-2d71-48e5-9cf2-e361471b9b74/rabbitmq/0.log" Nov 27 00:00:58 crc kubenswrapper[4903]: I1127 00:00:58.771250 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6_f442214b-4e84-4d9a-aa2c-9c3ae673ed4d/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:00:58 crc kubenswrapper[4903]: I1127 00:00:58.902795 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-s8hcp_f4801ee8-4d4e-4459-8289-60e5db96a3b9/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:00:59 crc kubenswrapper[4903]: I1127 00:00:59.673307 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2_154ea937-525e-406f-bef0-ffd2c360d7e1/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:00:59 crc kubenswrapper[4903]: I1127 00:00:59.753001 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-sgr64_f8164f4b-1f48-4f38-810b-3a3b636c48ed/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.009016 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-wrjtk_078bfb36-1f57-4173-b01a-cc7a6e3862dc/ssh-known-hosts-edpm-deployment/0.log" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.161392 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-purge-29403361-ffwm8"] Nov 27 00:01:00 crc kubenswrapper[4903]: E1127 00:01:00.161899 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c12d5a48-ec5d-4f46-a69f-191fe9029980" containerName="extract-content" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.161916 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="c12d5a48-ec5d-4f46-a69f-191fe9029980" containerName="extract-content" Nov 27 00:01:00 crc kubenswrapper[4903]: E1127 00:01:00.161943 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c14e363-c5e3-4f5c-b2fd-37589bbfa720" containerName="container-00" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.161950 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c14e363-c5e3-4f5c-b2fd-37589bbfa720" containerName="container-00" Nov 27 00:01:00 crc kubenswrapper[4903]: E1127 00:01:00.161988 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c12d5a48-ec5d-4f46-a69f-191fe9029980" containerName="extract-utilities" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.161995 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="c12d5a48-ec5d-4f46-a69f-191fe9029980" containerName="extract-utilities" Nov 27 00:01:00 crc kubenswrapper[4903]: E1127 00:01:00.162014 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c12d5a48-ec5d-4f46-a69f-191fe9029980" containerName="registry-server" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.162020 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="c12d5a48-ec5d-4f46-a69f-191fe9029980" containerName="registry-server" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.162244 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c14e363-c5e3-4f5c-b2fd-37589bbfa720" containerName="container-00" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.162262 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="c12d5a48-ec5d-4f46-a69f-191fe9029980" containerName="registry-server" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.163078 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-purge-29403361-ffwm8" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.179539 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-purge-29403361-vv6q4"] Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.181060 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-purge-29403361-vv6q4" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.183259 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.211555 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29403361-mfzw7"] Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.215740 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29403361-mfzw7" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.233266 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29403361-mfzw7"] Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.250367 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-purge-config-data\" (UniqueName: \"kubernetes.io/secret/bed02ce7-86ba-4f32-aa9d-5517cca15371-db-purge-config-data\") pod \"cinder-db-purge-29403361-ffwm8\" (UID: \"bed02ce7-86ba-4f32-aa9d-5517cca15371\") " pod="openstack/cinder-db-purge-29403361-ffwm8" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.250515 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bed02ce7-86ba-4f32-aa9d-5517cca15371-combined-ca-bundle\") pod \"cinder-db-purge-29403361-ffwm8\" (UID: \"bed02ce7-86ba-4f32-aa9d-5517cca15371\") " pod="openstack/cinder-db-purge-29403361-ffwm8" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.250562 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8ce0598-1d46-4ddb-b383-4b66b3296d4b-config-data\") pod \"glance-db-purge-29403361-vv6q4\" (UID: \"d8ce0598-1d46-4ddb-b383-4b66b3296d4b\") " pod="openstack/glance-db-purge-29403361-vv6q4" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.250655 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bed02ce7-86ba-4f32-aa9d-5517cca15371-config-data\") pod \"cinder-db-purge-29403361-ffwm8\" (UID: \"bed02ce7-86ba-4f32-aa9d-5517cca15371\") " pod="openstack/cinder-db-purge-29403361-ffwm8" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.250712 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8ce0598-1d46-4ddb-b383-4b66b3296d4b-combined-ca-bundle\") pod \"glance-db-purge-29403361-vv6q4\" (UID: \"d8ce0598-1d46-4ddb-b383-4b66b3296d4b\") " pod="openstack/glance-db-purge-29403361-vv6q4" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.250751 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-purge-config-data\" (UniqueName: \"kubernetes.io/secret/d8ce0598-1d46-4ddb-b383-4b66b3296d4b-db-purge-config-data\") pod \"glance-db-purge-29403361-vv6q4\" (UID: \"d8ce0598-1d46-4ddb-b383-4b66b3296d4b\") " pod="openstack/glance-db-purge-29403361-vv6q4" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.250870 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5d5vs\" (UniqueName: \"kubernetes.io/projected/d8ce0598-1d46-4ddb-b383-4b66b3296d4b-kube-api-access-5d5vs\") pod \"glance-db-purge-29403361-vv6q4\" (UID: \"d8ce0598-1d46-4ddb-b383-4b66b3296d4b\") " pod="openstack/glance-db-purge-29403361-vv6q4" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.250890 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f95xs\" (UniqueName: \"kubernetes.io/projected/bed02ce7-86ba-4f32-aa9d-5517cca15371-kube-api-access-f95xs\") pod \"cinder-db-purge-29403361-ffwm8\" (UID: \"bed02ce7-86ba-4f32-aa9d-5517cca15371\") " pod="openstack/cinder-db-purge-29403361-ffwm8" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.264330 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-purge-29403361-vv6q4"] Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.279797 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-purge-29403361-ffwm8"] Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.297344 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-76df48858c-p4q7x_73028630-97ff-425e-9ac8-1b30f1c834c4/proxy-server/0.log" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.328125 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-wp2ng_be81f20b-b9ca-44bf-8aad-2cd7a10e44cc/swift-ring-rebalance/0.log" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.352732 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8ce0598-1d46-4ddb-b383-4b66b3296d4b-config-data\") pod \"glance-db-purge-29403361-vv6q4\" (UID: \"d8ce0598-1d46-4ddb-b383-4b66b3296d4b\") " pod="openstack/glance-db-purge-29403361-vv6q4" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.352845 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fwkmb\" (UniqueName: \"kubernetes.io/projected/ce8e3d84-5904-40d3-99fd-0847d2f205f1-kube-api-access-fwkmb\") pod \"keystone-cron-29403361-mfzw7\" (UID: \"ce8e3d84-5904-40d3-99fd-0847d2f205f1\") " pod="openstack/keystone-cron-29403361-mfzw7" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.352884 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bed02ce7-86ba-4f32-aa9d-5517cca15371-config-data\") pod \"cinder-db-purge-29403361-ffwm8\" (UID: \"bed02ce7-86ba-4f32-aa9d-5517cca15371\") " pod="openstack/cinder-db-purge-29403361-ffwm8" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.352918 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8ce0598-1d46-4ddb-b383-4b66b3296d4b-combined-ca-bundle\") pod \"glance-db-purge-29403361-vv6q4\" (UID: \"d8ce0598-1d46-4ddb-b383-4b66b3296d4b\") " pod="openstack/glance-db-purge-29403361-vv6q4" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.352946 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-purge-config-data\" (UniqueName: \"kubernetes.io/secret/d8ce0598-1d46-4ddb-b383-4b66b3296d4b-db-purge-config-data\") pod \"glance-db-purge-29403361-vv6q4\" (UID: \"d8ce0598-1d46-4ddb-b383-4b66b3296d4b\") " pod="openstack/glance-db-purge-29403361-vv6q4" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.352978 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce8e3d84-5904-40d3-99fd-0847d2f205f1-combined-ca-bundle\") pod \"keystone-cron-29403361-mfzw7\" (UID: \"ce8e3d84-5904-40d3-99fd-0847d2f205f1\") " pod="openstack/keystone-cron-29403361-mfzw7" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.353002 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5d5vs\" (UniqueName: \"kubernetes.io/projected/d8ce0598-1d46-4ddb-b383-4b66b3296d4b-kube-api-access-5d5vs\") pod \"glance-db-purge-29403361-vv6q4\" (UID: \"d8ce0598-1d46-4ddb-b383-4b66b3296d4b\") " pod="openstack/glance-db-purge-29403361-vv6q4" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.353026 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f95xs\" (UniqueName: \"kubernetes.io/projected/bed02ce7-86ba-4f32-aa9d-5517cca15371-kube-api-access-f95xs\") pod \"cinder-db-purge-29403361-ffwm8\" (UID: \"bed02ce7-86ba-4f32-aa9d-5517cca15371\") " pod="openstack/cinder-db-purge-29403361-ffwm8" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.353072 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-purge-config-data\" (UniqueName: \"kubernetes.io/secret/bed02ce7-86ba-4f32-aa9d-5517cca15371-db-purge-config-data\") pod \"cinder-db-purge-29403361-ffwm8\" (UID: \"bed02ce7-86ba-4f32-aa9d-5517cca15371\") " pod="openstack/cinder-db-purge-29403361-ffwm8" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.353171 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce8e3d84-5904-40d3-99fd-0847d2f205f1-config-data\") pod \"keystone-cron-29403361-mfzw7\" (UID: \"ce8e3d84-5904-40d3-99fd-0847d2f205f1\") " pod="openstack/keystone-cron-29403361-mfzw7" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.353192 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ce8e3d84-5904-40d3-99fd-0847d2f205f1-fernet-keys\") pod \"keystone-cron-29403361-mfzw7\" (UID: \"ce8e3d84-5904-40d3-99fd-0847d2f205f1\") " pod="openstack/keystone-cron-29403361-mfzw7" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.353216 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bed02ce7-86ba-4f32-aa9d-5517cca15371-combined-ca-bundle\") pod \"cinder-db-purge-29403361-ffwm8\" (UID: \"bed02ce7-86ba-4f32-aa9d-5517cca15371\") " pod="openstack/cinder-db-purge-29403361-ffwm8" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.358015 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-76df48858c-p4q7x_73028630-97ff-425e-9ac8-1b30f1c834c4/proxy-httpd/0.log" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.360232 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bed02ce7-86ba-4f32-aa9d-5517cca15371-combined-ca-bundle\") pod \"cinder-db-purge-29403361-ffwm8\" (UID: \"bed02ce7-86ba-4f32-aa9d-5517cca15371\") " pod="openstack/cinder-db-purge-29403361-ffwm8" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.361145 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-purge-config-data\" (UniqueName: \"kubernetes.io/secret/d8ce0598-1d46-4ddb-b383-4b66b3296d4b-db-purge-config-data\") pod \"glance-db-purge-29403361-vv6q4\" (UID: \"d8ce0598-1d46-4ddb-b383-4b66b3296d4b\") " pod="openstack/glance-db-purge-29403361-vv6q4" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.363095 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8ce0598-1d46-4ddb-b383-4b66b3296d4b-config-data\") pod \"glance-db-purge-29403361-vv6q4\" (UID: \"d8ce0598-1d46-4ddb-b383-4b66b3296d4b\") " pod="openstack/glance-db-purge-29403361-vv6q4" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.365440 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bed02ce7-86ba-4f32-aa9d-5517cca15371-config-data\") pod \"cinder-db-purge-29403361-ffwm8\" (UID: \"bed02ce7-86ba-4f32-aa9d-5517cca15371\") " pod="openstack/cinder-db-purge-29403361-ffwm8" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.373065 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-purge-config-data\" (UniqueName: \"kubernetes.io/secret/bed02ce7-86ba-4f32-aa9d-5517cca15371-db-purge-config-data\") pod \"cinder-db-purge-29403361-ffwm8\" (UID: \"bed02ce7-86ba-4f32-aa9d-5517cca15371\") " pod="openstack/cinder-db-purge-29403361-ffwm8" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.373196 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5d5vs\" (UniqueName: \"kubernetes.io/projected/d8ce0598-1d46-4ddb-b383-4b66b3296d4b-kube-api-access-5d5vs\") pod \"glance-db-purge-29403361-vv6q4\" (UID: \"d8ce0598-1d46-4ddb-b383-4b66b3296d4b\") " pod="openstack/glance-db-purge-29403361-vv6q4" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.374411 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8ce0598-1d46-4ddb-b383-4b66b3296d4b-combined-ca-bundle\") pod \"glance-db-purge-29403361-vv6q4\" (UID: \"d8ce0598-1d46-4ddb-b383-4b66b3296d4b\") " pod="openstack/glance-db-purge-29403361-vv6q4" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.375788 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f95xs\" (UniqueName: \"kubernetes.io/projected/bed02ce7-86ba-4f32-aa9d-5517cca15371-kube-api-access-f95xs\") pod \"cinder-db-purge-29403361-ffwm8\" (UID: \"bed02ce7-86ba-4f32-aa9d-5517cca15371\") " pod="openstack/cinder-db-purge-29403361-ffwm8" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.455916 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce8e3d84-5904-40d3-99fd-0847d2f205f1-config-data\") pod \"keystone-cron-29403361-mfzw7\" (UID: \"ce8e3d84-5904-40d3-99fd-0847d2f205f1\") " pod="openstack/keystone-cron-29403361-mfzw7" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.455965 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ce8e3d84-5904-40d3-99fd-0847d2f205f1-fernet-keys\") pod \"keystone-cron-29403361-mfzw7\" (UID: \"ce8e3d84-5904-40d3-99fd-0847d2f205f1\") " pod="openstack/keystone-cron-29403361-mfzw7" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.456077 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fwkmb\" (UniqueName: \"kubernetes.io/projected/ce8e3d84-5904-40d3-99fd-0847d2f205f1-kube-api-access-fwkmb\") pod \"keystone-cron-29403361-mfzw7\" (UID: \"ce8e3d84-5904-40d3-99fd-0847d2f205f1\") " pod="openstack/keystone-cron-29403361-mfzw7" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.456184 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce8e3d84-5904-40d3-99fd-0847d2f205f1-combined-ca-bundle\") pod \"keystone-cron-29403361-mfzw7\" (UID: \"ce8e3d84-5904-40d3-99fd-0847d2f205f1\") " pod="openstack/keystone-cron-29403361-mfzw7" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.461019 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce8e3d84-5904-40d3-99fd-0847d2f205f1-combined-ca-bundle\") pod \"keystone-cron-29403361-mfzw7\" (UID: \"ce8e3d84-5904-40d3-99fd-0847d2f205f1\") " pod="openstack/keystone-cron-29403361-mfzw7" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.462159 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ce8e3d84-5904-40d3-99fd-0847d2f205f1-fernet-keys\") pod \"keystone-cron-29403361-mfzw7\" (UID: \"ce8e3d84-5904-40d3-99fd-0847d2f205f1\") " pod="openstack/keystone-cron-29403361-mfzw7" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.462844 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce8e3d84-5904-40d3-99fd-0847d2f205f1-config-data\") pod \"keystone-cron-29403361-mfzw7\" (UID: \"ce8e3d84-5904-40d3-99fd-0847d2f205f1\") " pod="openstack/keystone-cron-29403361-mfzw7" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.476647 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fwkmb\" (UniqueName: \"kubernetes.io/projected/ce8e3d84-5904-40d3-99fd-0847d2f205f1-kube-api-access-fwkmb\") pod \"keystone-cron-29403361-mfzw7\" (UID: \"ce8e3d84-5904-40d3-99fd-0847d2f205f1\") " pod="openstack/keystone-cron-29403361-mfzw7" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.487341 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-purge-29403361-ffwm8" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.508676 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f34b822-e8fa-4f6d-b793-01d0e80ccb06/account-auditor/0.log" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.531648 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-purge-29403361-vv6q4" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.543741 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29403361-mfzw7" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.560308 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f34b822-e8fa-4f6d-b793-01d0e80ccb06/account-reaper/0.log" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.760514 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f34b822-e8fa-4f6d-b793-01d0e80ccb06/account-replicator/0.log" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.835358 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f34b822-e8fa-4f6d-b793-01d0e80ccb06/container-auditor/0.log" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.882205 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f34b822-e8fa-4f6d-b793-01d0e80ccb06/account-server/0.log" Nov 27 00:01:00 crc kubenswrapper[4903]: I1127 00:01:00.888151 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f34b822-e8fa-4f6d-b793-01d0e80ccb06/container-replicator/0.log" Nov 27 00:01:01 crc kubenswrapper[4903]: I1127 00:01:01.105789 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-purge-29403361-vv6q4"] Nov 27 00:01:01 crc kubenswrapper[4903]: I1127 00:01:01.134905 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-purge-29403361-ffwm8"] Nov 27 00:01:01 crc kubenswrapper[4903]: I1127 00:01:01.242176 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29403361-mfzw7"] Nov 27 00:01:01 crc kubenswrapper[4903]: I1127 00:01:01.544486 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f34b822-e8fa-4f6d-b793-01d0e80ccb06/object-auditor/0.log" Nov 27 00:01:01 crc kubenswrapper[4903]: I1127 00:01:01.570928 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f34b822-e8fa-4f6d-b793-01d0e80ccb06/container-updater/0.log" Nov 27 00:01:01 crc kubenswrapper[4903]: I1127 00:01:01.573846 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f34b822-e8fa-4f6d-b793-01d0e80ccb06/object-expirer/0.log" Nov 27 00:01:01 crc kubenswrapper[4903]: I1127 00:01:01.606268 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f34b822-e8fa-4f6d-b793-01d0e80ccb06/container-server/0.log" Nov 27 00:01:01 crc kubenswrapper[4903]: I1127 00:01:01.636495 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29403361-mfzw7" event={"ID":"ce8e3d84-5904-40d3-99fd-0847d2f205f1","Type":"ContainerStarted","Data":"8dc1db90ac17511240d7354fc1d3157bd8c9e2b5ae16324f215d33a541eae396"} Nov 27 00:01:01 crc kubenswrapper[4903]: I1127 00:01:01.638707 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-purge-29403361-ffwm8" event={"ID":"bed02ce7-86ba-4f32-aa9d-5517cca15371","Type":"ContainerStarted","Data":"28496b6e1fdd3ebde3066dbb719c36301ef305a8429c4171b4dae6a1212571c9"} Nov 27 00:01:01 crc kubenswrapper[4903]: I1127 00:01:01.640394 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-purge-29403361-vv6q4" event={"ID":"d8ce0598-1d46-4ddb-b383-4b66b3296d4b","Type":"ContainerStarted","Data":"40997955b76304a45379460d8cd7125ecaac74e9e72e43fa4adaa742425fa5df"} Nov 27 00:01:01 crc kubenswrapper[4903]: I1127 00:01:01.809518 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f34b822-e8fa-4f6d-b793-01d0e80ccb06/rsync/0.log" Nov 27 00:01:01 crc kubenswrapper[4903]: I1127 00:01:01.854647 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f34b822-e8fa-4f6d-b793-01d0e80ccb06/object-server/0.log" Nov 27 00:01:01 crc kubenswrapper[4903]: I1127 00:01:01.922846 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f34b822-e8fa-4f6d-b793-01d0e80ccb06/object-updater/0.log" Nov 27 00:01:01 crc kubenswrapper[4903]: I1127 00:01:01.933979 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f34b822-e8fa-4f6d-b793-01d0e80ccb06/object-replicator/0.log" Nov 27 00:01:01 crc kubenswrapper[4903]: I1127 00:01:01.981132 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 27 00:01:01 crc kubenswrapper[4903]: I1127 00:01:01.981185 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 27 00:01:02 crc kubenswrapper[4903]: I1127 00:01:02.146883 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f34b822-e8fa-4f6d-b793-01d0e80ccb06/swift-recon-cron/0.log" Nov 27 00:01:02 crc kubenswrapper[4903]: I1127 00:01:02.167658 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk_78703fde-a3cc-4241-940e-f92a638f8549/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:01:02 crc kubenswrapper[4903]: I1127 00:01:02.474298 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5_5582dbe8-0a07-4c5f-9054-5e0bc32c2819/telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:01:02 crc kubenswrapper[4903]: I1127 00:01:02.677645 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29403361-mfzw7" event={"ID":"ce8e3d84-5904-40d3-99fd-0847d2f205f1","Type":"ContainerStarted","Data":"7358df8659b7dd8a4b597c424ca45a60133f0de0a454c734b2670908fcb5f50f"} Nov 27 00:01:02 crc kubenswrapper[4903]: I1127 00:01:02.716164 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29403361-mfzw7" podStartSLOduration=2.716146003 podStartE2EDuration="2.716146003s" podCreationTimestamp="2025-11-27 00:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-27 00:01:02.696326093 +0000 UTC m=+5991.386561003" watchObservedRunningTime="2025-11-27 00:01:02.716146003 +0000 UTC m=+5991.406380913" Nov 27 00:01:02 crc kubenswrapper[4903]: I1127 00:01:02.793814 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_27638dee-c020-4daa-a79a-5acf5e013899/test-operator-logs-container/0.log" Nov 27 00:01:02 crc kubenswrapper[4903]: I1127 00:01:02.796387 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2_5b5ab3d3-0223-4b0f-ab25-785af487d360/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:01:03 crc kubenswrapper[4903]: I1127 00:01:03.498170 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_47c0a41f-61f3-4e6c-8367-a25c5a75d02b/tempest-tests-tempest-tests-runner/0.log" Nov 27 00:01:03 crc kubenswrapper[4903]: I1127 00:01:03.688638 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-purge-29403361-ffwm8" event={"ID":"bed02ce7-86ba-4f32-aa9d-5517cca15371","Type":"ContainerStarted","Data":"4d6393353bccf1612f64cb2b0fe838594142dbfee8fba893bc79596985844635"} Nov 27 00:01:03 crc kubenswrapper[4903]: I1127 00:01:03.693759 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-purge-29403361-vv6q4" event={"ID":"d8ce0598-1d46-4ddb-b383-4b66b3296d4b","Type":"ContainerStarted","Data":"f2197e908abbc6ba94e1686ba25e99a925f72686a5e436b97eb84e88f9515488"} Nov 27 00:01:03 crc kubenswrapper[4903]: I1127 00:01:03.729500 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-purge-29403361-ffwm8" podStartSLOduration=3.7294778969999998 podStartE2EDuration="3.729477897s" podCreationTimestamp="2025-11-27 00:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-27 00:01:03.708027674 +0000 UTC m=+5992.398262584" watchObservedRunningTime="2025-11-27 00:01:03.729477897 +0000 UTC m=+5992.419712797" Nov 27 00:01:03 crc kubenswrapper[4903]: I1127 00:01:03.754537 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-purge-29403361-vv6q4" podStartSLOduration=3.754517336 podStartE2EDuration="3.754517336s" podCreationTimestamp="2025-11-27 00:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-27 00:01:03.723165689 +0000 UTC m=+5992.413400599" watchObservedRunningTime="2025-11-27 00:01:03.754517336 +0000 UTC m=+5992.444752246" Nov 27 00:01:05 crc kubenswrapper[4903]: I1127 00:01:05.718616 4903 generic.go:334] "Generic (PLEG): container finished" podID="d8ce0598-1d46-4ddb-b383-4b66b3296d4b" containerID="f2197e908abbc6ba94e1686ba25e99a925f72686a5e436b97eb84e88f9515488" exitCode=0 Nov 27 00:01:05 crc kubenswrapper[4903]: I1127 00:01:05.718961 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-purge-29403361-vv6q4" event={"ID":"d8ce0598-1d46-4ddb-b383-4b66b3296d4b","Type":"ContainerDied","Data":"f2197e908abbc6ba94e1686ba25e99a925f72686a5e436b97eb84e88f9515488"} Nov 27 00:01:06 crc kubenswrapper[4903]: I1127 00:01:06.734465 4903 generic.go:334] "Generic (PLEG): container finished" podID="ce8e3d84-5904-40d3-99fd-0847d2f205f1" containerID="7358df8659b7dd8a4b597c424ca45a60133f0de0a454c734b2670908fcb5f50f" exitCode=0 Nov 27 00:01:06 crc kubenswrapper[4903]: I1127 00:01:06.734599 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29403361-mfzw7" event={"ID":"ce8e3d84-5904-40d3-99fd-0847d2f205f1","Type":"ContainerDied","Data":"7358df8659b7dd8a4b597c424ca45a60133f0de0a454c734b2670908fcb5f50f"} Nov 27 00:01:07 crc kubenswrapper[4903]: I1127 00:01:07.140913 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-purge-29403361-vv6q4" Nov 27 00:01:07 crc kubenswrapper[4903]: I1127 00:01:07.233451 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8ce0598-1d46-4ddb-b383-4b66b3296d4b-combined-ca-bundle\") pod \"d8ce0598-1d46-4ddb-b383-4b66b3296d4b\" (UID: \"d8ce0598-1d46-4ddb-b383-4b66b3296d4b\") " Nov 27 00:01:07 crc kubenswrapper[4903]: I1127 00:01:07.233806 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5d5vs\" (UniqueName: \"kubernetes.io/projected/d8ce0598-1d46-4ddb-b383-4b66b3296d4b-kube-api-access-5d5vs\") pod \"d8ce0598-1d46-4ddb-b383-4b66b3296d4b\" (UID: \"d8ce0598-1d46-4ddb-b383-4b66b3296d4b\") " Nov 27 00:01:07 crc kubenswrapper[4903]: I1127 00:01:07.233889 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8ce0598-1d46-4ddb-b383-4b66b3296d4b-config-data\") pod \"d8ce0598-1d46-4ddb-b383-4b66b3296d4b\" (UID: \"d8ce0598-1d46-4ddb-b383-4b66b3296d4b\") " Nov 27 00:01:07 crc kubenswrapper[4903]: I1127 00:01:07.233905 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-purge-config-data\" (UniqueName: \"kubernetes.io/secret/d8ce0598-1d46-4ddb-b383-4b66b3296d4b-db-purge-config-data\") pod \"d8ce0598-1d46-4ddb-b383-4b66b3296d4b\" (UID: \"d8ce0598-1d46-4ddb-b383-4b66b3296d4b\") " Nov 27 00:01:07 crc kubenswrapper[4903]: I1127 00:01:07.239244 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8ce0598-1d46-4ddb-b383-4b66b3296d4b-db-purge-config-data" (OuterVolumeSpecName: "db-purge-config-data") pod "d8ce0598-1d46-4ddb-b383-4b66b3296d4b" (UID: "d8ce0598-1d46-4ddb-b383-4b66b3296d4b"). InnerVolumeSpecName "db-purge-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:01:07 crc kubenswrapper[4903]: I1127 00:01:07.243818 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8ce0598-1d46-4ddb-b383-4b66b3296d4b-kube-api-access-5d5vs" (OuterVolumeSpecName: "kube-api-access-5d5vs") pod "d8ce0598-1d46-4ddb-b383-4b66b3296d4b" (UID: "d8ce0598-1d46-4ddb-b383-4b66b3296d4b"). InnerVolumeSpecName "kube-api-access-5d5vs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:01:07 crc kubenswrapper[4903]: I1127 00:01:07.270881 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8ce0598-1d46-4ddb-b383-4b66b3296d4b-config-data" (OuterVolumeSpecName: "config-data") pod "d8ce0598-1d46-4ddb-b383-4b66b3296d4b" (UID: "d8ce0598-1d46-4ddb-b383-4b66b3296d4b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:01:07 crc kubenswrapper[4903]: I1127 00:01:07.287018 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8ce0598-1d46-4ddb-b383-4b66b3296d4b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d8ce0598-1d46-4ddb-b383-4b66b3296d4b" (UID: "d8ce0598-1d46-4ddb-b383-4b66b3296d4b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:01:07 crc kubenswrapper[4903]: I1127 00:01:07.337780 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5d5vs\" (UniqueName: \"kubernetes.io/projected/d8ce0598-1d46-4ddb-b383-4b66b3296d4b-kube-api-access-5d5vs\") on node \"crc\" DevicePath \"\"" Nov 27 00:01:07 crc kubenswrapper[4903]: I1127 00:01:07.338002 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8ce0598-1d46-4ddb-b383-4b66b3296d4b-config-data\") on node \"crc\" DevicePath \"\"" Nov 27 00:01:07 crc kubenswrapper[4903]: I1127 00:01:07.338074 4903 reconciler_common.go:293] "Volume detached for volume \"db-purge-config-data\" (UniqueName: \"kubernetes.io/secret/d8ce0598-1d46-4ddb-b383-4b66b3296d4b-db-purge-config-data\") on node \"crc\" DevicePath \"\"" Nov 27 00:01:07 crc kubenswrapper[4903]: I1127 00:01:07.338135 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8ce0598-1d46-4ddb-b383-4b66b3296d4b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 27 00:01:07 crc kubenswrapper[4903]: I1127 00:01:07.745963 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-purge-29403361-vv6q4" event={"ID":"d8ce0598-1d46-4ddb-b383-4b66b3296d4b","Type":"ContainerDied","Data":"40997955b76304a45379460d8cd7125ecaac74e9e72e43fa4adaa742425fa5df"} Nov 27 00:01:07 crc kubenswrapper[4903]: I1127 00:01:07.747211 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="40997955b76304a45379460d8cd7125ecaac74e9e72e43fa4adaa742425fa5df" Nov 27 00:01:07 crc kubenswrapper[4903]: I1127 00:01:07.747403 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-purge-29403361-vv6q4" Nov 27 00:01:07 crc kubenswrapper[4903]: I1127 00:01:07.752333 4903 generic.go:334] "Generic (PLEG): container finished" podID="bed02ce7-86ba-4f32-aa9d-5517cca15371" containerID="4d6393353bccf1612f64cb2b0fe838594142dbfee8fba893bc79596985844635" exitCode=0 Nov 27 00:01:07 crc kubenswrapper[4903]: I1127 00:01:07.752514 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-purge-29403361-ffwm8" event={"ID":"bed02ce7-86ba-4f32-aa9d-5517cca15371","Type":"ContainerDied","Data":"4d6393353bccf1612f64cb2b0fe838594142dbfee8fba893bc79596985844635"} Nov 27 00:01:08 crc kubenswrapper[4903]: I1127 00:01:08.279937 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29403361-mfzw7" Nov 27 00:01:08 crc kubenswrapper[4903]: I1127 00:01:08.357105 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ce8e3d84-5904-40d3-99fd-0847d2f205f1-fernet-keys\") pod \"ce8e3d84-5904-40d3-99fd-0847d2f205f1\" (UID: \"ce8e3d84-5904-40d3-99fd-0847d2f205f1\") " Nov 27 00:01:08 crc kubenswrapper[4903]: I1127 00:01:08.357174 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce8e3d84-5904-40d3-99fd-0847d2f205f1-combined-ca-bundle\") pod \"ce8e3d84-5904-40d3-99fd-0847d2f205f1\" (UID: \"ce8e3d84-5904-40d3-99fd-0847d2f205f1\") " Nov 27 00:01:08 crc kubenswrapper[4903]: I1127 00:01:08.357286 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fwkmb\" (UniqueName: \"kubernetes.io/projected/ce8e3d84-5904-40d3-99fd-0847d2f205f1-kube-api-access-fwkmb\") pod \"ce8e3d84-5904-40d3-99fd-0847d2f205f1\" (UID: \"ce8e3d84-5904-40d3-99fd-0847d2f205f1\") " Nov 27 00:01:08 crc kubenswrapper[4903]: I1127 00:01:08.357311 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce8e3d84-5904-40d3-99fd-0847d2f205f1-config-data\") pod \"ce8e3d84-5904-40d3-99fd-0847d2f205f1\" (UID: \"ce8e3d84-5904-40d3-99fd-0847d2f205f1\") " Nov 27 00:01:08 crc kubenswrapper[4903]: I1127 00:01:08.365938 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce8e3d84-5904-40d3-99fd-0847d2f205f1-kube-api-access-fwkmb" (OuterVolumeSpecName: "kube-api-access-fwkmb") pod "ce8e3d84-5904-40d3-99fd-0847d2f205f1" (UID: "ce8e3d84-5904-40d3-99fd-0847d2f205f1"). InnerVolumeSpecName "kube-api-access-fwkmb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:01:08 crc kubenswrapper[4903]: I1127 00:01:08.373274 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce8e3d84-5904-40d3-99fd-0847d2f205f1-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "ce8e3d84-5904-40d3-99fd-0847d2f205f1" (UID: "ce8e3d84-5904-40d3-99fd-0847d2f205f1"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:01:08 crc kubenswrapper[4903]: I1127 00:01:08.403828 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce8e3d84-5904-40d3-99fd-0847d2f205f1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ce8e3d84-5904-40d3-99fd-0847d2f205f1" (UID: "ce8e3d84-5904-40d3-99fd-0847d2f205f1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:01:08 crc kubenswrapper[4903]: I1127 00:01:08.459434 4903 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ce8e3d84-5904-40d3-99fd-0847d2f205f1-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 27 00:01:08 crc kubenswrapper[4903]: I1127 00:01:08.459463 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce8e3d84-5904-40d3-99fd-0847d2f205f1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 27 00:01:08 crc kubenswrapper[4903]: I1127 00:01:08.459474 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fwkmb\" (UniqueName: \"kubernetes.io/projected/ce8e3d84-5904-40d3-99fd-0847d2f205f1-kube-api-access-fwkmb\") on node \"crc\" DevicePath \"\"" Nov 27 00:01:08 crc kubenswrapper[4903]: I1127 00:01:08.476230 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce8e3d84-5904-40d3-99fd-0847d2f205f1-config-data" (OuterVolumeSpecName: "config-data") pod "ce8e3d84-5904-40d3-99fd-0847d2f205f1" (UID: "ce8e3d84-5904-40d3-99fd-0847d2f205f1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:01:08 crc kubenswrapper[4903]: I1127 00:01:08.561070 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ce8e3d84-5904-40d3-99fd-0847d2f205f1-config-data\") on node \"crc\" DevicePath \"\"" Nov 27 00:01:08 crc kubenswrapper[4903]: I1127 00:01:08.596970 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_6fd08b11-1328-47a3-82a3-286d70df4394/memcached/0.log" Nov 27 00:01:08 crc kubenswrapper[4903]: I1127 00:01:08.763022 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29403361-mfzw7" Nov 27 00:01:08 crc kubenswrapper[4903]: I1127 00:01:08.763023 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29403361-mfzw7" event={"ID":"ce8e3d84-5904-40d3-99fd-0847d2f205f1","Type":"ContainerDied","Data":"8dc1db90ac17511240d7354fc1d3157bd8c9e2b5ae16324f215d33a541eae396"} Nov 27 00:01:08 crc kubenswrapper[4903]: I1127 00:01:08.763402 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8dc1db90ac17511240d7354fc1d3157bd8c9e2b5ae16324f215d33a541eae396" Nov 27 00:01:09 crc kubenswrapper[4903]: I1127 00:01:09.774119 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-purge-29403361-ffwm8" event={"ID":"bed02ce7-86ba-4f32-aa9d-5517cca15371","Type":"ContainerDied","Data":"28496b6e1fdd3ebde3066dbb719c36301ef305a8429c4171b4dae6a1212571c9"} Nov 27 00:01:09 crc kubenswrapper[4903]: I1127 00:01:09.774156 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="28496b6e1fdd3ebde3066dbb719c36301ef305a8429c4171b4dae6a1212571c9" Nov 27 00:01:09 crc kubenswrapper[4903]: I1127 00:01:09.845269 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-purge-29403361-ffwm8" Nov 27 00:01:09 crc kubenswrapper[4903]: I1127 00:01:09.885036 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-purge-config-data\" (UniqueName: \"kubernetes.io/secret/bed02ce7-86ba-4f32-aa9d-5517cca15371-db-purge-config-data\") pod \"bed02ce7-86ba-4f32-aa9d-5517cca15371\" (UID: \"bed02ce7-86ba-4f32-aa9d-5517cca15371\") " Nov 27 00:01:09 crc kubenswrapper[4903]: I1127 00:01:09.885117 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f95xs\" (UniqueName: \"kubernetes.io/projected/bed02ce7-86ba-4f32-aa9d-5517cca15371-kube-api-access-f95xs\") pod \"bed02ce7-86ba-4f32-aa9d-5517cca15371\" (UID: \"bed02ce7-86ba-4f32-aa9d-5517cca15371\") " Nov 27 00:01:09 crc kubenswrapper[4903]: I1127 00:01:09.885149 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bed02ce7-86ba-4f32-aa9d-5517cca15371-config-data\") pod \"bed02ce7-86ba-4f32-aa9d-5517cca15371\" (UID: \"bed02ce7-86ba-4f32-aa9d-5517cca15371\") " Nov 27 00:01:09 crc kubenswrapper[4903]: I1127 00:01:09.885177 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bed02ce7-86ba-4f32-aa9d-5517cca15371-combined-ca-bundle\") pod \"bed02ce7-86ba-4f32-aa9d-5517cca15371\" (UID: \"bed02ce7-86ba-4f32-aa9d-5517cca15371\") " Nov 27 00:01:09 crc kubenswrapper[4903]: I1127 00:01:09.898004 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bed02ce7-86ba-4f32-aa9d-5517cca15371-db-purge-config-data" (OuterVolumeSpecName: "db-purge-config-data") pod "bed02ce7-86ba-4f32-aa9d-5517cca15371" (UID: "bed02ce7-86ba-4f32-aa9d-5517cca15371"). InnerVolumeSpecName "db-purge-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:01:09 crc kubenswrapper[4903]: I1127 00:01:09.903046 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bed02ce7-86ba-4f32-aa9d-5517cca15371-kube-api-access-f95xs" (OuterVolumeSpecName: "kube-api-access-f95xs") pod "bed02ce7-86ba-4f32-aa9d-5517cca15371" (UID: "bed02ce7-86ba-4f32-aa9d-5517cca15371"). InnerVolumeSpecName "kube-api-access-f95xs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:01:09 crc kubenswrapper[4903]: I1127 00:01:09.936849 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bed02ce7-86ba-4f32-aa9d-5517cca15371-config-data" (OuterVolumeSpecName: "config-data") pod "bed02ce7-86ba-4f32-aa9d-5517cca15371" (UID: "bed02ce7-86ba-4f32-aa9d-5517cca15371"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:01:09 crc kubenswrapper[4903]: I1127 00:01:09.940137 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bed02ce7-86ba-4f32-aa9d-5517cca15371-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bed02ce7-86ba-4f32-aa9d-5517cca15371" (UID: "bed02ce7-86ba-4f32-aa9d-5517cca15371"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:01:09 crc kubenswrapper[4903]: I1127 00:01:09.988150 4903 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bed02ce7-86ba-4f32-aa9d-5517cca15371-config-data\") on node \"crc\" DevicePath \"\"" Nov 27 00:01:09 crc kubenswrapper[4903]: I1127 00:01:09.988379 4903 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bed02ce7-86ba-4f32-aa9d-5517cca15371-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 27 00:01:09 crc kubenswrapper[4903]: I1127 00:01:09.988463 4903 reconciler_common.go:293] "Volume detached for volume \"db-purge-config-data\" (UniqueName: \"kubernetes.io/secret/bed02ce7-86ba-4f32-aa9d-5517cca15371-db-purge-config-data\") on node \"crc\" DevicePath \"\"" Nov 27 00:01:09 crc kubenswrapper[4903]: I1127 00:01:09.988535 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f95xs\" (UniqueName: \"kubernetes.io/projected/bed02ce7-86ba-4f32-aa9d-5517cca15371-kube-api-access-f95xs\") on node \"crc\" DevicePath \"\"" Nov 27 00:01:10 crc kubenswrapper[4903]: I1127 00:01:10.784146 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-purge-29403361-ffwm8" Nov 27 00:01:31 crc kubenswrapper[4903]: I1127 00:01:31.535377 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw_34b7b696-e29f-43e0-8186-9ca0219ab924/util/0.log" Nov 27 00:01:31 crc kubenswrapper[4903]: I1127 00:01:31.807765 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw_34b7b696-e29f-43e0-8186-9ca0219ab924/util/0.log" Nov 27 00:01:31 crc kubenswrapper[4903]: I1127 00:01:31.822017 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw_34b7b696-e29f-43e0-8186-9ca0219ab924/pull/0.log" Nov 27 00:01:31 crc kubenswrapper[4903]: I1127 00:01:31.838955 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw_34b7b696-e29f-43e0-8186-9ca0219ab924/pull/0.log" Nov 27 00:01:31 crc kubenswrapper[4903]: I1127 00:01:31.972188 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw_34b7b696-e29f-43e0-8186-9ca0219ab924/pull/0.log" Nov 27 00:01:31 crc kubenswrapper[4903]: I1127 00:01:31.981314 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 27 00:01:31 crc kubenswrapper[4903]: I1127 00:01:31.981362 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 27 00:01:31 crc kubenswrapper[4903]: I1127 00:01:31.981405 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 27 00:01:31 crc kubenswrapper[4903]: I1127 00:01:31.982241 4903 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"02f515062b7ff2bda08e09cb8c72a3611586455ba29e6f79c4fb592e1f750455"} pod="openshift-machine-config-operator/machine-config-daemon-wjwph" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 27 00:01:31 crc kubenswrapper[4903]: I1127 00:01:31.982296 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" containerID="cri-o://02f515062b7ff2bda08e09cb8c72a3611586455ba29e6f79c4fb592e1f750455" gracePeriod=600 Nov 27 00:01:32 crc kubenswrapper[4903]: I1127 00:01:32.011057 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw_34b7b696-e29f-43e0-8186-9ca0219ab924/util/0.log" Nov 27 00:01:32 crc kubenswrapper[4903]: I1127 00:01:32.043865 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw_34b7b696-e29f-43e0-8186-9ca0219ab924/extract/0.log" Nov 27 00:01:32 crc kubenswrapper[4903]: E1127 00:01:32.105601 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:01:32 crc kubenswrapper[4903]: I1127 00:01:32.195266 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-6hzbx_3e621847-5f60-491a-8e5c-f2fb10df1726/manager/3.log" Nov 27 00:01:32 crc kubenswrapper[4903]: I1127 00:01:32.205668 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-6hzbx_3e621847-5f60-491a-8e5c-f2fb10df1726/kube-rbac-proxy/0.log" Nov 27 00:01:32 crc kubenswrapper[4903]: I1127 00:01:32.232150 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-6hzbx_3e621847-5f60-491a-8e5c-f2fb10df1726/manager/2.log" Nov 27 00:01:32 crc kubenswrapper[4903]: I1127 00:01:32.394436 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-n7krq_d9a3465f-cd49-4af9-a908-58aec0273dbe/kube-rbac-proxy/0.log" Nov 27 00:01:32 crc kubenswrapper[4903]: I1127 00:01:32.405535 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-n7krq_d9a3465f-cd49-4af9-a908-58aec0273dbe/manager/3.log" Nov 27 00:01:32 crc kubenswrapper[4903]: I1127 00:01:32.466602 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-n7krq_d9a3465f-cd49-4af9-a908-58aec0273dbe/manager/2.log" Nov 27 00:01:32 crc kubenswrapper[4903]: I1127 00:01:32.588551 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-rtztw_63feada5-3911-469e-a0b1-539b7aa2948d/manager/2.log" Nov 27 00:01:32 crc kubenswrapper[4903]: I1127 00:01:32.592931 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-rtztw_63feada5-3911-469e-a0b1-539b7aa2948d/kube-rbac-proxy/0.log" Nov 27 00:01:32 crc kubenswrapper[4903]: I1127 00:01:32.662657 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-rtztw_63feada5-3911-469e-a0b1-539b7aa2948d/manager/1.log" Nov 27 00:01:32 crc kubenswrapper[4903]: I1127 00:01:32.763758 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-shqxg_710215b7-5e67-47d8-833f-b8db638cac56/kube-rbac-proxy/0.log" Nov 27 00:01:32 crc kubenswrapper[4903]: I1127 00:01:32.774145 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-shqxg_710215b7-5e67-47d8-833f-b8db638cac56/manager/3.log" Nov 27 00:01:32 crc kubenswrapper[4903]: I1127 00:01:32.846959 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-shqxg_710215b7-5e67-47d8-833f-b8db638cac56/manager/2.log" Nov 27 00:01:32 crc kubenswrapper[4903]: I1127 00:01:32.928988 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-x59hr_e0c12217-0537-436e-b0d9-5e5049888268/kube-rbac-proxy/0.log" Nov 27 00:01:32 crc kubenswrapper[4903]: I1127 00:01:32.987269 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-x59hr_e0c12217-0537-436e-b0d9-5e5049888268/manager/3.log" Nov 27 00:01:33 crc kubenswrapper[4903]: I1127 00:01:33.017255 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-x59hr_e0c12217-0537-436e-b0d9-5e5049888268/manager/2.log" Nov 27 00:01:33 crc kubenswrapper[4903]: I1127 00:01:33.040826 4903 generic.go:334] "Generic (PLEG): container finished" podID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerID="02f515062b7ff2bda08e09cb8c72a3611586455ba29e6f79c4fb592e1f750455" exitCode=0 Nov 27 00:01:33 crc kubenswrapper[4903]: I1127 00:01:33.040878 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerDied","Data":"02f515062b7ff2bda08e09cb8c72a3611586455ba29e6f79c4fb592e1f750455"} Nov 27 00:01:33 crc kubenswrapper[4903]: I1127 00:01:33.040923 4903 scope.go:117] "RemoveContainer" containerID="d8d408e8b27dccccb582ea57a07f095c78cb127762f8f697c02db973a850b725" Nov 27 00:01:33 crc kubenswrapper[4903]: I1127 00:01:33.041663 4903 scope.go:117] "RemoveContainer" containerID="02f515062b7ff2bda08e09cb8c72a3611586455ba29e6f79c4fb592e1f750455" Nov 27 00:01:33 crc kubenswrapper[4903]: E1127 00:01:33.041967 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:01:33 crc kubenswrapper[4903]: I1127 00:01:33.158365 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-v4b66_34b48ba8-04a0-463d-9e31-b7c13127ce9c/kube-rbac-proxy/0.log" Nov 27 00:01:33 crc kubenswrapper[4903]: I1127 00:01:33.191700 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-v4b66_34b48ba8-04a0-463d-9e31-b7c13127ce9c/manager/3.log" Nov 27 00:01:33 crc kubenswrapper[4903]: I1127 00:01:33.262211 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-v4b66_34b48ba8-04a0-463d-9e31-b7c13127ce9c/manager/2.log" Nov 27 00:01:33 crc kubenswrapper[4903]: I1127 00:01:33.335522 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-tdlsw_b34e8bed-559a-49d6-b870-c375f36be49f/kube-rbac-proxy/0.log" Nov 27 00:01:33 crc kubenswrapper[4903]: I1127 00:01:33.413035 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-tdlsw_b34e8bed-559a-49d6-b870-c375f36be49f/manager/2.log" Nov 27 00:01:33 crc kubenswrapper[4903]: I1127 00:01:33.462725 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-tdlsw_b34e8bed-559a-49d6-b870-c375f36be49f/manager/1.log" Nov 27 00:01:33 crc kubenswrapper[4903]: I1127 00:01:33.531803 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-bm7r7_ced64189-a8c9-4e13-956b-f69139a9602b/kube-rbac-proxy/0.log" Nov 27 00:01:33 crc kubenswrapper[4903]: I1127 00:01:33.604494 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-bm7r7_ced64189-a8c9-4e13-956b-f69139a9602b/manager/3.log" Nov 27 00:01:33 crc kubenswrapper[4903]: I1127 00:01:33.665454 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-bm7r7_ced64189-a8c9-4e13-956b-f69139a9602b/manager/2.log" Nov 27 00:01:33 crc kubenswrapper[4903]: I1127 00:01:33.732319 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-kxg8s_e3d89c00-9723-43a3-a1d2-866787257900/kube-rbac-proxy/0.log" Nov 27 00:01:33 crc kubenswrapper[4903]: I1127 00:01:33.802992 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-kxg8s_e3d89c00-9723-43a3-a1d2-866787257900/manager/3.log" Nov 27 00:01:33 crc kubenswrapper[4903]: I1127 00:01:33.868447 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-kxg8s_e3d89c00-9723-43a3-a1d2-866787257900/manager/2.log" Nov 27 00:01:33 crc kubenswrapper[4903]: I1127 00:01:33.940391 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-vj562_edfb7faf-e9af-4ee8-85cd-a11af5812946/kube-rbac-proxy/0.log" Nov 27 00:01:33 crc kubenswrapper[4903]: I1127 00:01:33.976137 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-vj562_edfb7faf-e9af-4ee8-85cd-a11af5812946/manager/3.log" Nov 27 00:01:34 crc kubenswrapper[4903]: I1127 00:01:34.057270 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-vj562_edfb7faf-e9af-4ee8-85cd-a11af5812946/manager/2.log" Nov 27 00:01:34 crc kubenswrapper[4903]: I1127 00:01:34.134549 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-pzwmk_32ccd880-8dfa-46d1-b262-5d10422527ec/manager/2.log" Nov 27 00:01:34 crc kubenswrapper[4903]: I1127 00:01:34.159367 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-pzwmk_32ccd880-8dfa-46d1-b262-5d10422527ec/kube-rbac-proxy/0.log" Nov 27 00:01:34 crc kubenswrapper[4903]: I1127 00:01:34.255509 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-pzwmk_32ccd880-8dfa-46d1-b262-5d10422527ec/manager/1.log" Nov 27 00:01:34 crc kubenswrapper[4903]: I1127 00:01:34.355673 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-t5gqj_9c3a16ab-252a-4a01-aaab-b273d3d55c0a/kube-rbac-proxy/0.log" Nov 27 00:01:34 crc kubenswrapper[4903]: I1127 00:01:34.362569 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-t5gqj_9c3a16ab-252a-4a01-aaab-b273d3d55c0a/manager/3.log" Nov 27 00:01:34 crc kubenswrapper[4903]: I1127 00:01:34.455889 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-t5gqj_9c3a16ab-252a-4a01-aaab-b273d3d55c0a/manager/2.log" Nov 27 00:01:34 crc kubenswrapper[4903]: I1127 00:01:34.533449 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-5kmlf_fcacd7dc-2b08-46d7-98c2-09cf6b6d690b/kube-rbac-proxy/0.log" Nov 27 00:01:34 crc kubenswrapper[4903]: I1127 00:01:34.596896 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-5kmlf_fcacd7dc-2b08-46d7-98c2-09cf6b6d690b/manager/3.log" Nov 27 00:01:34 crc kubenswrapper[4903]: I1127 00:01:34.652166 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-5kmlf_fcacd7dc-2b08-46d7-98c2-09cf6b6d690b/manager/2.log" Nov 27 00:01:34 crc kubenswrapper[4903]: I1127 00:01:34.721170 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-nz8x4_6b930423-80e6-4e2c-825f-7deceec090f5/kube-rbac-proxy/0.log" Nov 27 00:01:34 crc kubenswrapper[4903]: I1127 00:01:34.808955 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-nz8x4_6b930423-80e6-4e2c-825f-7deceec090f5/manager/3.log" Nov 27 00:01:34 crc kubenswrapper[4903]: I1127 00:01:34.853953 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-nz8x4_6b930423-80e6-4e2c-825f-7deceec090f5/manager/2.log" Nov 27 00:01:34 crc kubenswrapper[4903]: I1127 00:01:34.920279 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54_d4e9967e-dcf0-42c1-94fc-fea289ed54c2/kube-rbac-proxy/0.log" Nov 27 00:01:35 crc kubenswrapper[4903]: I1127 00:01:35.036740 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54_d4e9967e-dcf0-42c1-94fc-fea289ed54c2/manager/1.log" Nov 27 00:01:35 crc kubenswrapper[4903]: I1127 00:01:35.041766 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54_d4e9967e-dcf0-42c1-94fc-fea289ed54c2/manager/0.log" Nov 27 00:01:35 crc kubenswrapper[4903]: I1127 00:01:35.228584 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-5467d974c6-lpj77_9239ccfa-cbaa-44b2-a70f-94a281d885f6/manager/1.log" Nov 27 00:01:35 crc kubenswrapper[4903]: I1127 00:01:35.373148 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-5bd96487c4-8k4kq_651c7100-bdd0-41e2-8a7f-eaab13dfd391/operator/1.log" Nov 27 00:01:35 crc kubenswrapper[4903]: I1127 00:01:35.447466 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-zljmf_a348b7af-eb1c-4c45-8611-9a37a4ee9ac7/registry-server/0.log" Nov 27 00:01:35 crc kubenswrapper[4903]: I1127 00:01:35.530980 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-5bd96487c4-8k4kq_651c7100-bdd0-41e2-8a7f-eaab13dfd391/operator/0.log" Nov 27 00:01:35 crc kubenswrapper[4903]: I1127 00:01:35.596597 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-jn49q_0c7b8e09-c502-425e-ac59-b2befd1132fa/kube-rbac-proxy/0.log" Nov 27 00:01:35 crc kubenswrapper[4903]: I1127 00:01:35.652493 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-jn49q_0c7b8e09-c502-425e-ac59-b2befd1132fa/manager/3.log" Nov 27 00:01:35 crc kubenswrapper[4903]: I1127 00:01:35.740103 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-5467d974c6-lpj77_9239ccfa-cbaa-44b2-a70f-94a281d885f6/manager/2.log" Nov 27 00:01:35 crc kubenswrapper[4903]: I1127 00:01:35.763662 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-jn49q_0c7b8e09-c502-425e-ac59-b2befd1132fa/manager/2.log" Nov 27 00:01:35 crc kubenswrapper[4903]: I1127 00:01:35.861315 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-vjt6h_83927c87-ccd7-4b29-97b1-8d03ce0d1b1e/kube-rbac-proxy/0.log" Nov 27 00:01:35 crc kubenswrapper[4903]: I1127 00:01:35.878138 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-vjt6h_83927c87-ccd7-4b29-97b1-8d03ce0d1b1e/manager/3.log" Nov 27 00:01:35 crc kubenswrapper[4903]: I1127 00:01:35.933360 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-vjt6h_83927c87-ccd7-4b29-97b1-8d03ce0d1b1e/manager/2.log" Nov 27 00:01:36 crc kubenswrapper[4903]: I1127 00:01:36.002482 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-fzd8p_8248a160-f606-4eaa-9bc1-0e7fcc1ab852/operator/2.log" Nov 27 00:01:36 crc kubenswrapper[4903]: I1127 00:01:36.086150 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-fzd8p_8248a160-f606-4eaa-9bc1-0e7fcc1ab852/operator/1.log" Nov 27 00:01:36 crc kubenswrapper[4903]: I1127 00:01:36.104863 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-2h7mb_736b757c-8584-4b59-81d6-ffdd8bbac62c/kube-rbac-proxy/0.log" Nov 27 00:01:36 crc kubenswrapper[4903]: I1127 00:01:36.158221 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-2h7mb_736b757c-8584-4b59-81d6-ffdd8bbac62c/manager/3.log" Nov 27 00:01:36 crc kubenswrapper[4903]: I1127 00:01:36.193294 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-2h7mb_736b757c-8584-4b59-81d6-ffdd8bbac62c/manager/2.log" Nov 27 00:01:36 crc kubenswrapper[4903]: I1127 00:01:36.249069 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-6986c4df8b-bkqnw_3f2ebc07-fbfc-4bd6-9622-63b820e47247/kube-rbac-proxy/0.log" Nov 27 00:01:36 crc kubenswrapper[4903]: I1127 00:01:36.339307 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-6986c4df8b-bkqnw_3f2ebc07-fbfc-4bd6-9622-63b820e47247/manager/1.log" Nov 27 00:01:36 crc kubenswrapper[4903]: I1127 00:01:36.368589 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-6986c4df8b-bkqnw_3f2ebc07-fbfc-4bd6-9622-63b820e47247/manager/2.log" Nov 27 00:01:36 crc kubenswrapper[4903]: I1127 00:01:36.490626 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-gw5wx_1a890e26-66fb-47d6-85dc-ae6b9045e4c6/manager/1.log" Nov 27 00:01:36 crc kubenswrapper[4903]: I1127 00:01:36.499948 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-gw5wx_1a890e26-66fb-47d6-85dc-ae6b9045e4c6/kube-rbac-proxy/0.log" Nov 27 00:01:36 crc kubenswrapper[4903]: I1127 00:01:36.504352 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-gw5wx_1a890e26-66fb-47d6-85dc-ae6b9045e4c6/manager/0.log" Nov 27 00:01:36 crc kubenswrapper[4903]: I1127 00:01:36.629109 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-bwfhp_f8815d8e-4b34-47b3-98fa-8370205381e0/kube-rbac-proxy/0.log" Nov 27 00:01:36 crc kubenswrapper[4903]: I1127 00:01:36.672578 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-bwfhp_f8815d8e-4b34-47b3-98fa-8370205381e0/manager/2.log" Nov 27 00:01:36 crc kubenswrapper[4903]: I1127 00:01:36.690618 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-bwfhp_f8815d8e-4b34-47b3-98fa-8370205381e0/manager/1.log" Nov 27 00:01:48 crc kubenswrapper[4903]: I1127 00:01:48.029548 4903 scope.go:117] "RemoveContainer" containerID="02f515062b7ff2bda08e09cb8c72a3611586455ba29e6f79c4fb592e1f750455" Nov 27 00:01:48 crc kubenswrapper[4903]: E1127 00:01:48.030477 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:01:56 crc kubenswrapper[4903]: I1127 00:01:56.647945 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-zb6l2_72c47664-999f-45b2-b047-184bdc7d8c58/control-plane-machine-set-operator/0.log" Nov 27 00:01:56 crc kubenswrapper[4903]: I1127 00:01:56.806720 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-x242l_27420cfc-cc8f-4482-9206-706ab7bf9430/kube-rbac-proxy/0.log" Nov 27 00:01:56 crc kubenswrapper[4903]: I1127 00:01:56.844951 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-x242l_27420cfc-cc8f-4482-9206-706ab7bf9430/machine-api-operator/0.log" Nov 27 00:02:01 crc kubenswrapper[4903]: I1127 00:02:01.029015 4903 scope.go:117] "RemoveContainer" containerID="02f515062b7ff2bda08e09cb8c72a3611586455ba29e6f79c4fb592e1f750455" Nov 27 00:02:01 crc kubenswrapper[4903]: E1127 00:02:01.030025 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:02:10 crc kubenswrapper[4903]: I1127 00:02:10.347884 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-qdk8j_de11c064-60b1-4f96-a316-bc903f061766/cert-manager-controller/1.log" Nov 27 00:02:10 crc kubenswrapper[4903]: I1127 00:02:10.569263 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-qdk8j_de11c064-60b1-4f96-a316-bc903f061766/cert-manager-controller/0.log" Nov 27 00:02:10 crc kubenswrapper[4903]: I1127 00:02:10.759142 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-dfvzf_580a58c8-ce17-4d85-991a-e51d3eb639b3/cert-manager-cainjector/1.log" Nov 27 00:02:10 crc kubenswrapper[4903]: I1127 00:02:10.767798 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-dfvzf_580a58c8-ce17-4d85-991a-e51d3eb639b3/cert-manager-cainjector/0.log" Nov 27 00:02:10 crc kubenswrapper[4903]: I1127 00:02:10.893602 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-zz98c_da38aa1b-878d-476e-b742-7329a813bf99/cert-manager-webhook/0.log" Nov 27 00:02:16 crc kubenswrapper[4903]: I1127 00:02:16.028583 4903 scope.go:117] "RemoveContainer" containerID="02f515062b7ff2bda08e09cb8c72a3611586455ba29e6f79c4fb592e1f750455" Nov 27 00:02:16 crc kubenswrapper[4903]: E1127 00:02:16.029683 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:02:26 crc kubenswrapper[4903]: I1127 00:02:26.630437 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-hpzbl_872167df-4435-42c4-9503-8bfca809574f/nmstate-console-plugin/0.log" Nov 27 00:02:26 crc kubenswrapper[4903]: I1127 00:02:26.724731 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-v5kzk_097195ec-5a3f-4d57-b864-264165398ff6/nmstate-handler/0.log" Nov 27 00:02:26 crc kubenswrapper[4903]: I1127 00:02:26.871656 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-ctqlb_500a4a0f-2474-482b-9f47-7304d9bd35e9/kube-rbac-proxy/0.log" Nov 27 00:02:26 crc kubenswrapper[4903]: I1127 00:02:26.873104 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-ctqlb_500a4a0f-2474-482b-9f47-7304d9bd35e9/nmstate-metrics/0.log" Nov 27 00:02:27 crc kubenswrapper[4903]: I1127 00:02:27.031897 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-btnn5_f0eef496-9727-4ee7-9c31-c2afcb9303c6/nmstate-operator/0.log" Nov 27 00:02:27 crc kubenswrapper[4903]: I1127 00:02:27.060567 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-9rhdl_8ebd57a0-155f-481c-9d2e-11c69e14b6fc/nmstate-webhook/0.log" Nov 27 00:02:28 crc kubenswrapper[4903]: I1127 00:02:28.029264 4903 scope.go:117] "RemoveContainer" containerID="02f515062b7ff2bda08e09cb8c72a3611586455ba29e6f79c4fb592e1f750455" Nov 27 00:02:28 crc kubenswrapper[4903]: E1127 00:02:28.029641 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:02:40 crc kubenswrapper[4903]: I1127 00:02:40.029264 4903 scope.go:117] "RemoveContainer" containerID="02f515062b7ff2bda08e09cb8c72a3611586455ba29e6f79c4fb592e1f750455" Nov 27 00:02:40 crc kubenswrapper[4903]: E1127 00:02:40.030090 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:02:42 crc kubenswrapper[4903]: I1127 00:02:42.056732 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-5c85bfb685-pwxll_a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b/kube-rbac-proxy/0.log" Nov 27 00:02:42 crc kubenswrapper[4903]: I1127 00:02:42.108759 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-5c85bfb685-pwxll_a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b/manager/1.log" Nov 27 00:02:42 crc kubenswrapper[4903]: I1127 00:02:42.495548 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-5c85bfb685-pwxll_a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b/manager/0.log" Nov 27 00:02:51 crc kubenswrapper[4903]: I1127 00:02:51.029077 4903 scope.go:117] "RemoveContainer" containerID="02f515062b7ff2bda08e09cb8c72a3611586455ba29e6f79c4fb592e1f750455" Nov 27 00:02:51 crc kubenswrapper[4903]: E1127 00:02:51.030028 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:02:57 crc kubenswrapper[4903]: I1127 00:02:57.848987 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_cluster-logging-operator-ff9846bd-xslxr_a56d21dd-5874-4bc1-9fe3-5fc6a5b4a354/cluster-logging-operator/0.log" Nov 27 00:02:58 crc kubenswrapper[4903]: I1127 00:02:58.017482 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_collector-xj985_49456ff3-4275-428d-84cc-25664a331100/collector/0.log" Nov 27 00:02:58 crc kubenswrapper[4903]: I1127 00:02:58.184800 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-compactor-0_0cb383d8-296b-4298-8f2f-28edb1f1278f/loki-compactor/0.log" Nov 27 00:02:58 crc kubenswrapper[4903]: I1127 00:02:58.209431 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-distributor-76cc67bf56-htm5b_546d4145-a63b-4664-86d0-9ce432670a7b/loki-distributor/0.log" Nov 27 00:02:58 crc kubenswrapper[4903]: I1127 00:02:58.366938 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-6b8dc7bf86-g4fbs_16a3e6c0-118c-4827-b39b-d9a59d959fec/gateway/0.log" Nov 27 00:02:58 crc kubenswrapper[4903]: I1127 00:02:58.388637 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-6b8dc7bf86-g4fbs_16a3e6c0-118c-4827-b39b-d9a59d959fec/opa/0.log" Nov 27 00:02:58 crc kubenswrapper[4903]: I1127 00:02:58.483767 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-6b8dc7bf86-tvgns_64e0c0a9-13e7-4f0b-989d-8f217958cd92/gateway/0.log" Nov 27 00:02:58 crc kubenswrapper[4903]: I1127 00:02:58.547880 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-6b8dc7bf86-tvgns_64e0c0a9-13e7-4f0b-989d-8f217958cd92/opa/0.log" Nov 27 00:02:58 crc kubenswrapper[4903]: I1127 00:02:58.608160 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-index-gateway-0_d4ca2376-fa84-4a6c-b47b-3661bacfd578/loki-index-gateway/0.log" Nov 27 00:02:58 crc kubenswrapper[4903]: I1127 00:02:58.797655 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-ingester-0_23d0313e-2bdb-4054-8951-2e29fd19f371/loki-ingester/0.log" Nov 27 00:02:58 crc kubenswrapper[4903]: I1127 00:02:58.811620 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-querier-5895d59bb8-k45nx_138798d6-77b9-4e20-970b-d83e0378e667/loki-querier/0.log" Nov 27 00:02:58 crc kubenswrapper[4903]: I1127 00:02:58.970920 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-query-frontend-84558f7c9f-52tt7_9fb3c717-adf5-483c-9d16-6d47d489a5e1/loki-query-frontend/0.log" Nov 27 00:03:06 crc kubenswrapper[4903]: I1127 00:03:06.029872 4903 scope.go:117] "RemoveContainer" containerID="02f515062b7ff2bda08e09cb8c72a3611586455ba29e6f79c4fb592e1f750455" Nov 27 00:03:06 crc kubenswrapper[4903]: E1127 00:03:06.031462 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:03:14 crc kubenswrapper[4903]: I1127 00:03:14.372585 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-hw6vb_a9e12d32-ef72-446c-b317-8d00a90a651b/kube-rbac-proxy/0.log" Nov 27 00:03:14 crc kubenswrapper[4903]: I1127 00:03:14.506157 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-hw6vb_a9e12d32-ef72-446c-b317-8d00a90a651b/controller/0.log" Nov 27 00:03:14 crc kubenswrapper[4903]: I1127 00:03:14.561256 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/cp-frr-files/0.log" Nov 27 00:03:14 crc kubenswrapper[4903]: I1127 00:03:14.765515 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/cp-frr-files/0.log" Nov 27 00:03:14 crc kubenswrapper[4903]: I1127 00:03:14.806032 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/cp-reloader/0.log" Nov 27 00:03:14 crc kubenswrapper[4903]: I1127 00:03:14.812573 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/cp-metrics/0.log" Nov 27 00:03:14 crc kubenswrapper[4903]: I1127 00:03:14.839079 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/cp-reloader/0.log" Nov 27 00:03:14 crc kubenswrapper[4903]: I1127 00:03:14.981856 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/cp-frr-files/0.log" Nov 27 00:03:15 crc kubenswrapper[4903]: I1127 00:03:15.010121 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/cp-reloader/0.log" Nov 27 00:03:15 crc kubenswrapper[4903]: I1127 00:03:15.013747 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/cp-metrics/0.log" Nov 27 00:03:15 crc kubenswrapper[4903]: I1127 00:03:15.066835 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/cp-metrics/0.log" Nov 27 00:03:15 crc kubenswrapper[4903]: I1127 00:03:15.216987 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/cp-frr-files/0.log" Nov 27 00:03:15 crc kubenswrapper[4903]: I1127 00:03:15.239161 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/cp-reloader/0.log" Nov 27 00:03:15 crc kubenswrapper[4903]: I1127 00:03:15.271704 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/cp-metrics/0.log" Nov 27 00:03:15 crc kubenswrapper[4903]: I1127 00:03:15.274766 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/controller/0.log" Nov 27 00:03:15 crc kubenswrapper[4903]: I1127 00:03:15.424997 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/frr-metrics/0.log" Nov 27 00:03:15 crc kubenswrapper[4903]: I1127 00:03:15.483284 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/kube-rbac-proxy-frr/0.log" Nov 27 00:03:15 crc kubenswrapper[4903]: I1127 00:03:15.502426 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/kube-rbac-proxy/0.log" Nov 27 00:03:15 crc kubenswrapper[4903]: I1127 00:03:15.656590 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/reloader/0.log" Nov 27 00:03:15 crc kubenswrapper[4903]: I1127 00:03:15.786910 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-s9t6m_5317f83c-9fcf-4df1-9823-bb92767545a9/frr-k8s-webhook-server/0.log" Nov 27 00:03:15 crc kubenswrapper[4903]: I1127 00:03:15.952175 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-57594f7c4c-gdzqb_b5900302-4880-4732-a477-8ed6cf3bfec3/manager/3.log" Nov 27 00:03:16 crc kubenswrapper[4903]: I1127 00:03:16.128425 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-57594f7c4c-gdzqb_b5900302-4880-4732-a477-8ed6cf3bfec3/manager/2.log" Nov 27 00:03:16 crc kubenswrapper[4903]: I1127 00:03:16.191757 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-d76ff59f5-8bdc9_1c731f8b-9333-4076-b193-54255a31e938/webhook-server/0.log" Nov 27 00:03:16 crc kubenswrapper[4903]: I1127 00:03:16.409993 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-f2g89_61e82f3d-2aca-46e7-bd0f-12c8b492c14e/kube-rbac-proxy/0.log" Nov 27 00:03:17 crc kubenswrapper[4903]: I1127 00:03:17.085893 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-f2g89_61e82f3d-2aca-46e7-bd0f-12c8b492c14e/speaker/0.log" Nov 27 00:03:17 crc kubenswrapper[4903]: I1127 00:03:17.336196 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/frr/0.log" Nov 27 00:03:21 crc kubenswrapper[4903]: I1127 00:03:21.029309 4903 scope.go:117] "RemoveContainer" containerID="02f515062b7ff2bda08e09cb8c72a3611586455ba29e6f79c4fb592e1f750455" Nov 27 00:03:21 crc kubenswrapper[4903]: E1127 00:03:21.030198 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:03:29 crc kubenswrapper[4903]: I1127 00:03:29.569929 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z_ed8d8216-366b-44a5-b2fd-0b3ad381efc9/util/0.log" Nov 27 00:03:29 crc kubenswrapper[4903]: I1127 00:03:29.825905 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z_ed8d8216-366b-44a5-b2fd-0b3ad381efc9/util/0.log" Nov 27 00:03:29 crc kubenswrapper[4903]: I1127 00:03:29.838661 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z_ed8d8216-366b-44a5-b2fd-0b3ad381efc9/pull/0.log" Nov 27 00:03:29 crc kubenswrapper[4903]: I1127 00:03:29.908577 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z_ed8d8216-366b-44a5-b2fd-0b3ad381efc9/pull/0.log" Nov 27 00:03:30 crc kubenswrapper[4903]: I1127 00:03:30.070418 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z_ed8d8216-366b-44a5-b2fd-0b3ad381efc9/pull/0.log" Nov 27 00:03:30 crc kubenswrapper[4903]: I1127 00:03:30.072121 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z_ed8d8216-366b-44a5-b2fd-0b3ad381efc9/util/0.log" Nov 27 00:03:30 crc kubenswrapper[4903]: I1127 00:03:30.099530 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z_ed8d8216-366b-44a5-b2fd-0b3ad381efc9/extract/0.log" Nov 27 00:03:30 crc kubenswrapper[4903]: I1127 00:03:30.235722 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8_7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f/util/0.log" Nov 27 00:03:30 crc kubenswrapper[4903]: I1127 00:03:30.405197 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8_7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f/util/0.log" Nov 27 00:03:30 crc kubenswrapper[4903]: I1127 00:03:30.415450 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8_7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f/pull/0.log" Nov 27 00:03:30 crc kubenswrapper[4903]: I1127 00:03:30.436171 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8_7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f/pull/0.log" Nov 27 00:03:30 crc kubenswrapper[4903]: I1127 00:03:30.601956 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8_7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f/pull/0.log" Nov 27 00:03:30 crc kubenswrapper[4903]: I1127 00:03:30.634236 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8_7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f/util/0.log" Nov 27 00:03:30 crc kubenswrapper[4903]: I1127 00:03:30.636253 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8_7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f/extract/0.log" Nov 27 00:03:30 crc kubenswrapper[4903]: I1127 00:03:30.760603 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85_b3a3d04f-4e15-4207-ab86-0a9c7f6da454/util/0.log" Nov 27 00:03:30 crc kubenswrapper[4903]: I1127 00:03:30.965663 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85_b3a3d04f-4e15-4207-ab86-0a9c7f6da454/pull/0.log" Nov 27 00:03:30 crc kubenswrapper[4903]: I1127 00:03:30.971822 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85_b3a3d04f-4e15-4207-ab86-0a9c7f6da454/pull/0.log" Nov 27 00:03:30 crc kubenswrapper[4903]: I1127 00:03:30.999894 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85_b3a3d04f-4e15-4207-ab86-0a9c7f6da454/util/0.log" Nov 27 00:03:31 crc kubenswrapper[4903]: I1127 00:03:31.140068 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85_b3a3d04f-4e15-4207-ab86-0a9c7f6da454/util/0.log" Nov 27 00:03:31 crc kubenswrapper[4903]: I1127 00:03:31.140991 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85_b3a3d04f-4e15-4207-ab86-0a9c7f6da454/pull/0.log" Nov 27 00:03:31 crc kubenswrapper[4903]: I1127 00:03:31.183499 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85_b3a3d04f-4e15-4207-ab86-0a9c7f6da454/extract/0.log" Nov 27 00:03:31 crc kubenswrapper[4903]: I1127 00:03:31.333785 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75_ff6ba428-63b4-4a8d-9b52-9d7dd77d0430/util/0.log" Nov 27 00:03:31 crc kubenswrapper[4903]: I1127 00:03:31.468435 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75_ff6ba428-63b4-4a8d-9b52-9d7dd77d0430/pull/0.log" Nov 27 00:03:31 crc kubenswrapper[4903]: I1127 00:03:31.476115 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75_ff6ba428-63b4-4a8d-9b52-9d7dd77d0430/util/0.log" Nov 27 00:03:31 crc kubenswrapper[4903]: I1127 00:03:31.482168 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75_ff6ba428-63b4-4a8d-9b52-9d7dd77d0430/pull/0.log" Nov 27 00:03:31 crc kubenswrapper[4903]: I1127 00:03:31.653118 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75_ff6ba428-63b4-4a8d-9b52-9d7dd77d0430/util/0.log" Nov 27 00:03:31 crc kubenswrapper[4903]: I1127 00:03:31.658075 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75_ff6ba428-63b4-4a8d-9b52-9d7dd77d0430/extract/0.log" Nov 27 00:03:31 crc kubenswrapper[4903]: I1127 00:03:31.684965 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75_ff6ba428-63b4-4a8d-9b52-9d7dd77d0430/pull/0.log" Nov 27 00:03:31 crc kubenswrapper[4903]: I1127 00:03:31.810242 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-npsdr_33cd040b-fe08-424c-a1af-62df1ed45ad4/extract-utilities/0.log" Nov 27 00:03:31 crc kubenswrapper[4903]: I1127 00:03:31.948439 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-npsdr_33cd040b-fe08-424c-a1af-62df1ed45ad4/extract-utilities/0.log" Nov 27 00:03:31 crc kubenswrapper[4903]: I1127 00:03:31.961279 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-npsdr_33cd040b-fe08-424c-a1af-62df1ed45ad4/extract-content/0.log" Nov 27 00:03:31 crc kubenswrapper[4903]: I1127 00:03:31.978984 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-npsdr_33cd040b-fe08-424c-a1af-62df1ed45ad4/extract-content/0.log" Nov 27 00:03:32 crc kubenswrapper[4903]: I1127 00:03:32.036117 4903 scope.go:117] "RemoveContainer" containerID="02f515062b7ff2bda08e09cb8c72a3611586455ba29e6f79c4fb592e1f750455" Nov 27 00:03:32 crc kubenswrapper[4903]: E1127 00:03:32.036428 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:03:32 crc kubenswrapper[4903]: I1127 00:03:32.133429 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-npsdr_33cd040b-fe08-424c-a1af-62df1ed45ad4/extract-utilities/0.log" Nov 27 00:03:32 crc kubenswrapper[4903]: I1127 00:03:32.190477 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-npsdr_33cd040b-fe08-424c-a1af-62df1ed45ad4/extract-content/0.log" Nov 27 00:03:32 crc kubenswrapper[4903]: I1127 00:03:32.393429 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wr586_b8dd7646-3929-498a-bfbd-40857a75e6fb/extract-utilities/0.log" Nov 27 00:03:32 crc kubenswrapper[4903]: I1127 00:03:32.448235 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-npsdr_33cd040b-fe08-424c-a1af-62df1ed45ad4/registry-server/0.log" Nov 27 00:03:32 crc kubenswrapper[4903]: I1127 00:03:32.619173 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wr586_b8dd7646-3929-498a-bfbd-40857a75e6fb/extract-utilities/0.log" Nov 27 00:03:32 crc kubenswrapper[4903]: I1127 00:03:32.638259 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wr586_b8dd7646-3929-498a-bfbd-40857a75e6fb/extract-content/0.log" Nov 27 00:03:32 crc kubenswrapper[4903]: I1127 00:03:32.638532 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wr586_b8dd7646-3929-498a-bfbd-40857a75e6fb/extract-content/0.log" Nov 27 00:03:32 crc kubenswrapper[4903]: I1127 00:03:32.825119 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wr586_b8dd7646-3929-498a-bfbd-40857a75e6fb/extract-content/0.log" Nov 27 00:03:32 crc kubenswrapper[4903]: I1127 00:03:32.838494 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wr586_b8dd7646-3929-498a-bfbd-40857a75e6fb/extract-utilities/0.log" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.045254 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf_0e3ab50d-4ada-420a-b14c-3bcd20623c58/util/0.log" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.282857 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf_0e3ab50d-4ada-420a-b14c-3bcd20623c58/pull/0.log" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.285988 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf_0e3ab50d-4ada-420a-b14c-3bcd20623c58/pull/0.log" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.288076 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf_0e3ab50d-4ada-420a-b14c-3bcd20623c58/util/0.log" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.455385 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wr586_b8dd7646-3929-498a-bfbd-40857a75e6fb/registry-server/0.log" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.466835 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf_0e3ab50d-4ada-420a-b14c-3bcd20623c58/util/0.log" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.486484 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf_0e3ab50d-4ada-420a-b14c-3bcd20623c58/pull/0.log" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.537348 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf_0e3ab50d-4ada-420a-b14c-3bcd20623c58/extract/0.log" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.629520 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-lktm4"] Nov 27 00:03:33 crc kubenswrapper[4903]: E1127 00:03:33.630278 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8ce0598-1d46-4ddb-b383-4b66b3296d4b" containerName="glance-dbpurge" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.630307 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8ce0598-1d46-4ddb-b383-4b66b3296d4b" containerName="glance-dbpurge" Nov 27 00:03:33 crc kubenswrapper[4903]: E1127 00:03:33.630351 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bed02ce7-86ba-4f32-aa9d-5517cca15371" containerName="cinder-db-purge" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.630360 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="bed02ce7-86ba-4f32-aa9d-5517cca15371" containerName="cinder-db-purge" Nov 27 00:03:33 crc kubenswrapper[4903]: E1127 00:03:33.630389 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce8e3d84-5904-40d3-99fd-0847d2f205f1" containerName="keystone-cron" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.630399 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce8e3d84-5904-40d3-99fd-0847d2f205f1" containerName="keystone-cron" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.630683 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8ce0598-1d46-4ddb-b383-4b66b3296d4b" containerName="glance-dbpurge" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.630730 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="bed02ce7-86ba-4f32-aa9d-5517cca15371" containerName="cinder-db-purge" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.630778 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce8e3d84-5904-40d3-99fd-0847d2f205f1" containerName="keystone-cron" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.632947 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lktm4" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.644523 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lktm4"] Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.683553 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-f9lv7_ad2be713-f117-46a7-a491-d75a9564cd48/marketplace-operator/0.log" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.687432 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qldpj_f8679d09-9456-47f4-98b9-db03a62c2224/extract-utilities/0.log" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.798791 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/162c1054-724a-4caf-b0eb-315b93e441e5-utilities\") pod \"certified-operators-lktm4\" (UID: \"162c1054-724a-4caf-b0eb-315b93e441e5\") " pod="openshift-marketplace/certified-operators-lktm4" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.798874 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6nwb\" (UniqueName: \"kubernetes.io/projected/162c1054-724a-4caf-b0eb-315b93e441e5-kube-api-access-k6nwb\") pod \"certified-operators-lktm4\" (UID: \"162c1054-724a-4caf-b0eb-315b93e441e5\") " pod="openshift-marketplace/certified-operators-lktm4" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.798942 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/162c1054-724a-4caf-b0eb-315b93e441e5-catalog-content\") pod \"certified-operators-lktm4\" (UID: \"162c1054-724a-4caf-b0eb-315b93e441e5\") " pod="openshift-marketplace/certified-operators-lktm4" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.895870 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qldpj_f8679d09-9456-47f4-98b9-db03a62c2224/extract-content/0.log" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.901259 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/162c1054-724a-4caf-b0eb-315b93e441e5-catalog-content\") pod \"certified-operators-lktm4\" (UID: \"162c1054-724a-4caf-b0eb-315b93e441e5\") " pod="openshift-marketplace/certified-operators-lktm4" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.901417 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/162c1054-724a-4caf-b0eb-315b93e441e5-utilities\") pod \"certified-operators-lktm4\" (UID: \"162c1054-724a-4caf-b0eb-315b93e441e5\") " pod="openshift-marketplace/certified-operators-lktm4" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.901471 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6nwb\" (UniqueName: \"kubernetes.io/projected/162c1054-724a-4caf-b0eb-315b93e441e5-kube-api-access-k6nwb\") pod \"certified-operators-lktm4\" (UID: \"162c1054-724a-4caf-b0eb-315b93e441e5\") " pod="openshift-marketplace/certified-operators-lktm4" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.902253 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qldpj_f8679d09-9456-47f4-98b9-db03a62c2224/extract-content/0.log" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.902408 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/162c1054-724a-4caf-b0eb-315b93e441e5-catalog-content\") pod \"certified-operators-lktm4\" (UID: \"162c1054-724a-4caf-b0eb-315b93e441e5\") " pod="openshift-marketplace/certified-operators-lktm4" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.902641 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/162c1054-724a-4caf-b0eb-315b93e441e5-utilities\") pod \"certified-operators-lktm4\" (UID: \"162c1054-724a-4caf-b0eb-315b93e441e5\") " pod="openshift-marketplace/certified-operators-lktm4" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.924559 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6nwb\" (UniqueName: \"kubernetes.io/projected/162c1054-724a-4caf-b0eb-315b93e441e5-kube-api-access-k6nwb\") pod \"certified-operators-lktm4\" (UID: \"162c1054-724a-4caf-b0eb-315b93e441e5\") " pod="openshift-marketplace/certified-operators-lktm4" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.949408 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qldpj_f8679d09-9456-47f4-98b9-db03a62c2224/extract-utilities/0.log" Nov 27 00:03:33 crc kubenswrapper[4903]: I1127 00:03:33.966261 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lktm4" Nov 27 00:03:34 crc kubenswrapper[4903]: I1127 00:03:34.368027 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qldpj_f8679d09-9456-47f4-98b9-db03a62c2224/extract-content/0.log" Nov 27 00:03:34 crc kubenswrapper[4903]: I1127 00:03:34.441724 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qldpj_f8679d09-9456-47f4-98b9-db03a62c2224/extract-utilities/0.log" Nov 27 00:03:34 crc kubenswrapper[4903]: I1127 00:03:34.469037 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-khkfx_ca8619ad-8673-4b83-907d-e274c4cd11ac/extract-utilities/0.log" Nov 27 00:03:34 crc kubenswrapper[4903]: I1127 00:03:34.477596 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qldpj_f8679d09-9456-47f4-98b9-db03a62c2224/registry-server/0.log" Nov 27 00:03:34 crc kubenswrapper[4903]: I1127 00:03:34.602716 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-khkfx_ca8619ad-8673-4b83-907d-e274c4cd11ac/extract-utilities/0.log" Nov 27 00:03:34 crc kubenswrapper[4903]: I1127 00:03:34.624102 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-khkfx_ca8619ad-8673-4b83-907d-e274c4cd11ac/extract-content/0.log" Nov 27 00:03:34 crc kubenswrapper[4903]: I1127 00:03:34.647647 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-khkfx_ca8619ad-8673-4b83-907d-e274c4cd11ac/extract-content/0.log" Nov 27 00:03:34 crc kubenswrapper[4903]: I1127 00:03:34.650645 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lktm4"] Nov 27 00:03:34 crc kubenswrapper[4903]: I1127 00:03:34.690544 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lktm4" event={"ID":"162c1054-724a-4caf-b0eb-315b93e441e5","Type":"ContainerStarted","Data":"e17aca9e0f722dccac2d2153a644e0055c531f89dc12b41c417b2a9c30c7b786"} Nov 27 00:03:34 crc kubenswrapper[4903]: I1127 00:03:34.815259 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-khkfx_ca8619ad-8673-4b83-907d-e274c4cd11ac/extract-content/0.log" Nov 27 00:03:34 crc kubenswrapper[4903]: I1127 00:03:34.815300 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-khkfx_ca8619ad-8673-4b83-907d-e274c4cd11ac/extract-utilities/0.log" Nov 27 00:03:35 crc kubenswrapper[4903]: I1127 00:03:35.622138 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-khkfx_ca8619ad-8673-4b83-907d-e274c4cd11ac/registry-server/0.log" Nov 27 00:03:35 crc kubenswrapper[4903]: I1127 00:03:35.701805 4903 generic.go:334] "Generic (PLEG): container finished" podID="162c1054-724a-4caf-b0eb-315b93e441e5" containerID="93150759549f13fe4ed6d11c6e3de1c3f7dcbc56bfde6187cff4bc958c2a6f9e" exitCode=0 Nov 27 00:03:35 crc kubenswrapper[4903]: I1127 00:03:35.701979 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lktm4" event={"ID":"162c1054-724a-4caf-b0eb-315b93e441e5","Type":"ContainerDied","Data":"93150759549f13fe4ed6d11c6e3de1c3f7dcbc56bfde6187cff4bc958c2a6f9e"} Nov 27 00:03:37 crc kubenswrapper[4903]: I1127 00:03:37.733942 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lktm4" event={"ID":"162c1054-724a-4caf-b0eb-315b93e441e5","Type":"ContainerStarted","Data":"bed91311678175ba6ee1e1669eb7bf35b7def8b1671c0f40301a52a775b414e7"} Nov 27 00:03:38 crc kubenswrapper[4903]: I1127 00:03:38.748394 4903 generic.go:334] "Generic (PLEG): container finished" podID="162c1054-724a-4caf-b0eb-315b93e441e5" containerID="bed91311678175ba6ee1e1669eb7bf35b7def8b1671c0f40301a52a775b414e7" exitCode=0 Nov 27 00:03:38 crc kubenswrapper[4903]: I1127 00:03:38.748498 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lktm4" event={"ID":"162c1054-724a-4caf-b0eb-315b93e441e5","Type":"ContainerDied","Data":"bed91311678175ba6ee1e1669eb7bf35b7def8b1671c0f40301a52a775b414e7"} Nov 27 00:03:39 crc kubenswrapper[4903]: I1127 00:03:39.764195 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lktm4" event={"ID":"162c1054-724a-4caf-b0eb-315b93e441e5","Type":"ContainerStarted","Data":"7f075944ee96c266941818a402ae2092371d791f2f6efdefc5f55ceec8a0f194"} Nov 27 00:03:39 crc kubenswrapper[4903]: I1127 00:03:39.822826 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-lktm4" podStartSLOduration=3.332005113 podStartE2EDuration="6.822799985s" podCreationTimestamp="2025-11-27 00:03:33 +0000 UTC" firstStartedPulling="2025-11-27 00:03:35.703633804 +0000 UTC m=+6144.393868714" lastFinishedPulling="2025-11-27 00:03:39.194428676 +0000 UTC m=+6147.884663586" observedRunningTime="2025-11-27 00:03:39.785359128 +0000 UTC m=+6148.475594048" watchObservedRunningTime="2025-11-27 00:03:39.822799985 +0000 UTC m=+6148.513034905" Nov 27 00:03:43 crc kubenswrapper[4903]: I1127 00:03:43.968262 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-lktm4" Nov 27 00:03:43 crc kubenswrapper[4903]: I1127 00:03:43.968860 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-lktm4" Nov 27 00:03:44 crc kubenswrapper[4903]: I1127 00:03:44.024881 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-lktm4" Nov 27 00:03:44 crc kubenswrapper[4903]: I1127 00:03:44.910890 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-lktm4" Nov 27 00:03:44 crc kubenswrapper[4903]: I1127 00:03:44.960855 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lktm4"] Nov 27 00:03:46 crc kubenswrapper[4903]: I1127 00:03:46.028363 4903 scope.go:117] "RemoveContainer" containerID="02f515062b7ff2bda08e09cb8c72a3611586455ba29e6f79c4fb592e1f750455" Nov 27 00:03:46 crc kubenswrapper[4903]: E1127 00:03:46.029026 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:03:46 crc kubenswrapper[4903]: I1127 00:03:46.847596 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-lktm4" podUID="162c1054-724a-4caf-b0eb-315b93e441e5" containerName="registry-server" containerID="cri-o://7f075944ee96c266941818a402ae2092371d791f2f6efdefc5f55ceec8a0f194" gracePeriod=2 Nov 27 00:03:47 crc kubenswrapper[4903]: I1127 00:03:47.378497 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lktm4" Nov 27 00:03:47 crc kubenswrapper[4903]: I1127 00:03:47.414878 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k6nwb\" (UniqueName: \"kubernetes.io/projected/162c1054-724a-4caf-b0eb-315b93e441e5-kube-api-access-k6nwb\") pod \"162c1054-724a-4caf-b0eb-315b93e441e5\" (UID: \"162c1054-724a-4caf-b0eb-315b93e441e5\") " Nov 27 00:03:47 crc kubenswrapper[4903]: I1127 00:03:47.415027 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/162c1054-724a-4caf-b0eb-315b93e441e5-utilities\") pod \"162c1054-724a-4caf-b0eb-315b93e441e5\" (UID: \"162c1054-724a-4caf-b0eb-315b93e441e5\") " Nov 27 00:03:47 crc kubenswrapper[4903]: I1127 00:03:47.415066 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/162c1054-724a-4caf-b0eb-315b93e441e5-catalog-content\") pod \"162c1054-724a-4caf-b0eb-315b93e441e5\" (UID: \"162c1054-724a-4caf-b0eb-315b93e441e5\") " Nov 27 00:03:47 crc kubenswrapper[4903]: I1127 00:03:47.417534 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/162c1054-724a-4caf-b0eb-315b93e441e5-utilities" (OuterVolumeSpecName: "utilities") pod "162c1054-724a-4caf-b0eb-315b93e441e5" (UID: "162c1054-724a-4caf-b0eb-315b93e441e5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 27 00:03:47 crc kubenswrapper[4903]: I1127 00:03:47.424912 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/162c1054-724a-4caf-b0eb-315b93e441e5-kube-api-access-k6nwb" (OuterVolumeSpecName: "kube-api-access-k6nwb") pod "162c1054-724a-4caf-b0eb-315b93e441e5" (UID: "162c1054-724a-4caf-b0eb-315b93e441e5"). InnerVolumeSpecName "kube-api-access-k6nwb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:03:47 crc kubenswrapper[4903]: I1127 00:03:47.511781 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/162c1054-724a-4caf-b0eb-315b93e441e5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "162c1054-724a-4caf-b0eb-315b93e441e5" (UID: "162c1054-724a-4caf-b0eb-315b93e441e5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 27 00:03:47 crc kubenswrapper[4903]: I1127 00:03:47.517889 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/162c1054-724a-4caf-b0eb-315b93e441e5-utilities\") on node \"crc\" DevicePath \"\"" Nov 27 00:03:47 crc kubenswrapper[4903]: I1127 00:03:47.517928 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/162c1054-724a-4caf-b0eb-315b93e441e5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 27 00:03:47 crc kubenswrapper[4903]: I1127 00:03:47.517939 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k6nwb\" (UniqueName: \"kubernetes.io/projected/162c1054-724a-4caf-b0eb-315b93e441e5-kube-api-access-k6nwb\") on node \"crc\" DevicePath \"\"" Nov 27 00:03:47 crc kubenswrapper[4903]: I1127 00:03:47.860886 4903 generic.go:334] "Generic (PLEG): container finished" podID="162c1054-724a-4caf-b0eb-315b93e441e5" containerID="7f075944ee96c266941818a402ae2092371d791f2f6efdefc5f55ceec8a0f194" exitCode=0 Nov 27 00:03:47 crc kubenswrapper[4903]: I1127 00:03:47.860925 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lktm4" event={"ID":"162c1054-724a-4caf-b0eb-315b93e441e5","Type":"ContainerDied","Data":"7f075944ee96c266941818a402ae2092371d791f2f6efdefc5f55ceec8a0f194"} Nov 27 00:03:47 crc kubenswrapper[4903]: I1127 00:03:47.860948 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lktm4" event={"ID":"162c1054-724a-4caf-b0eb-315b93e441e5","Type":"ContainerDied","Data":"e17aca9e0f722dccac2d2153a644e0055c531f89dc12b41c417b2a9c30c7b786"} Nov 27 00:03:47 crc kubenswrapper[4903]: I1127 00:03:47.860960 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lktm4" Nov 27 00:03:47 crc kubenswrapper[4903]: I1127 00:03:47.860964 4903 scope.go:117] "RemoveContainer" containerID="7f075944ee96c266941818a402ae2092371d791f2f6efdefc5f55ceec8a0f194" Nov 27 00:03:47 crc kubenswrapper[4903]: I1127 00:03:47.889017 4903 scope.go:117] "RemoveContainer" containerID="bed91311678175ba6ee1e1669eb7bf35b7def8b1671c0f40301a52a775b414e7" Nov 27 00:03:47 crc kubenswrapper[4903]: I1127 00:03:47.903875 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lktm4"] Nov 27 00:03:47 crc kubenswrapper[4903]: I1127 00:03:47.915010 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-lktm4"] Nov 27 00:03:47 crc kubenswrapper[4903]: I1127 00:03:47.916900 4903 scope.go:117] "RemoveContainer" containerID="93150759549f13fe4ed6d11c6e3de1c3f7dcbc56bfde6187cff4bc958c2a6f9e" Nov 27 00:03:47 crc kubenswrapper[4903]: I1127 00:03:47.976514 4903 scope.go:117] "RemoveContainer" containerID="7f075944ee96c266941818a402ae2092371d791f2f6efdefc5f55ceec8a0f194" Nov 27 00:03:47 crc kubenswrapper[4903]: E1127 00:03:47.977018 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f075944ee96c266941818a402ae2092371d791f2f6efdefc5f55ceec8a0f194\": container with ID starting with 7f075944ee96c266941818a402ae2092371d791f2f6efdefc5f55ceec8a0f194 not found: ID does not exist" containerID="7f075944ee96c266941818a402ae2092371d791f2f6efdefc5f55ceec8a0f194" Nov 27 00:03:47 crc kubenswrapper[4903]: I1127 00:03:47.977047 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f075944ee96c266941818a402ae2092371d791f2f6efdefc5f55ceec8a0f194"} err="failed to get container status \"7f075944ee96c266941818a402ae2092371d791f2f6efdefc5f55ceec8a0f194\": rpc error: code = NotFound desc = could not find container \"7f075944ee96c266941818a402ae2092371d791f2f6efdefc5f55ceec8a0f194\": container with ID starting with 7f075944ee96c266941818a402ae2092371d791f2f6efdefc5f55ceec8a0f194 not found: ID does not exist" Nov 27 00:03:47 crc kubenswrapper[4903]: I1127 00:03:47.977068 4903 scope.go:117] "RemoveContainer" containerID="bed91311678175ba6ee1e1669eb7bf35b7def8b1671c0f40301a52a775b414e7" Nov 27 00:03:47 crc kubenswrapper[4903]: E1127 00:03:47.977385 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bed91311678175ba6ee1e1669eb7bf35b7def8b1671c0f40301a52a775b414e7\": container with ID starting with bed91311678175ba6ee1e1669eb7bf35b7def8b1671c0f40301a52a775b414e7 not found: ID does not exist" containerID="bed91311678175ba6ee1e1669eb7bf35b7def8b1671c0f40301a52a775b414e7" Nov 27 00:03:47 crc kubenswrapper[4903]: I1127 00:03:47.977411 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bed91311678175ba6ee1e1669eb7bf35b7def8b1671c0f40301a52a775b414e7"} err="failed to get container status \"bed91311678175ba6ee1e1669eb7bf35b7def8b1671c0f40301a52a775b414e7\": rpc error: code = NotFound desc = could not find container \"bed91311678175ba6ee1e1669eb7bf35b7def8b1671c0f40301a52a775b414e7\": container with ID starting with bed91311678175ba6ee1e1669eb7bf35b7def8b1671c0f40301a52a775b414e7 not found: ID does not exist" Nov 27 00:03:47 crc kubenswrapper[4903]: I1127 00:03:47.977424 4903 scope.go:117] "RemoveContainer" containerID="93150759549f13fe4ed6d11c6e3de1c3f7dcbc56bfde6187cff4bc958c2a6f9e" Nov 27 00:03:47 crc kubenswrapper[4903]: E1127 00:03:47.977727 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"93150759549f13fe4ed6d11c6e3de1c3f7dcbc56bfde6187cff4bc958c2a6f9e\": container with ID starting with 93150759549f13fe4ed6d11c6e3de1c3f7dcbc56bfde6187cff4bc958c2a6f9e not found: ID does not exist" containerID="93150759549f13fe4ed6d11c6e3de1c3f7dcbc56bfde6187cff4bc958c2a6f9e" Nov 27 00:03:47 crc kubenswrapper[4903]: I1127 00:03:47.977796 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93150759549f13fe4ed6d11c6e3de1c3f7dcbc56bfde6187cff4bc958c2a6f9e"} err="failed to get container status \"93150759549f13fe4ed6d11c6e3de1c3f7dcbc56bfde6187cff4bc958c2a6f9e\": rpc error: code = NotFound desc = could not find container \"93150759549f13fe4ed6d11c6e3de1c3f7dcbc56bfde6187cff4bc958c2a6f9e\": container with ID starting with 93150759549f13fe4ed6d11c6e3de1c3f7dcbc56bfde6187cff4bc958c2a6f9e not found: ID does not exist" Nov 27 00:03:48 crc kubenswrapper[4903]: I1127 00:03:48.055615 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="162c1054-724a-4caf-b0eb-315b93e441e5" path="/var/lib/kubelet/pods/162c1054-724a-4caf-b0eb-315b93e441e5/volumes" Nov 27 00:03:48 crc kubenswrapper[4903]: I1127 00:03:48.652506 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-668cf9dfbb-kfn8d_5127cf5c-29a6-484d-9e1c-895e2bb109e3/prometheus-operator/0.log" Nov 27 00:03:48 crc kubenswrapper[4903]: I1127 00:03:48.767795 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b_68d1419a-288f-4fcb-9d4d-8f9568fa2170/prometheus-operator-admission-webhook/0.log" Nov 27 00:03:48 crc kubenswrapper[4903]: I1127 00:03:48.863002 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-594b6c89d9-jw268_7674d75c-8272-4f53-86fe-3fb83d421c63/prometheus-operator-admission-webhook/0.log" Nov 27 00:03:48 crc kubenswrapper[4903]: I1127 00:03:48.987047 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-d8bb48f5d-6gzsx_c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471/operator/0.log" Nov 27 00:03:49 crc kubenswrapper[4903]: I1127 00:03:49.109656 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-ui-dashboards-7d5fb4cbfb-ccgq6_dff09e4b-a38e-43fa-8394-e6922e356c4d/observability-ui-dashboards/0.log" Nov 27 00:03:49 crc kubenswrapper[4903]: I1127 00:03:49.222997 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5446b9c989-clswb_193e7d1c-0f98-4013-aad9-16711a00ab2e/perses-operator/0.log" Nov 27 00:03:58 crc kubenswrapper[4903]: I1127 00:03:58.028850 4903 scope.go:117] "RemoveContainer" containerID="02f515062b7ff2bda08e09cb8c72a3611586455ba29e6f79c4fb592e1f750455" Nov 27 00:03:58 crc kubenswrapper[4903]: E1127 00:03:58.029618 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:04:02 crc kubenswrapper[4903]: I1127 00:04:02.336024 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-5c85bfb685-pwxll_a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b/kube-rbac-proxy/0.log" Nov 27 00:04:02 crc kubenswrapper[4903]: I1127 00:04:02.425953 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-5c85bfb685-pwxll_a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b/manager/1.log" Nov 27 00:04:02 crc kubenswrapper[4903]: I1127 00:04:02.480144 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-5c85bfb685-pwxll_a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b/manager/0.log" Nov 27 00:04:11 crc kubenswrapper[4903]: I1127 00:04:11.029390 4903 scope.go:117] "RemoveContainer" containerID="02f515062b7ff2bda08e09cb8c72a3611586455ba29e6f79c4fb592e1f750455" Nov 27 00:04:11 crc kubenswrapper[4903]: E1127 00:04:11.030335 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:04:19 crc kubenswrapper[4903]: I1127 00:04:19.916478 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-cqx62"] Nov 27 00:04:19 crc kubenswrapper[4903]: E1127 00:04:19.917378 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="162c1054-724a-4caf-b0eb-315b93e441e5" containerName="extract-utilities" Nov 27 00:04:19 crc kubenswrapper[4903]: I1127 00:04:19.917391 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="162c1054-724a-4caf-b0eb-315b93e441e5" containerName="extract-utilities" Nov 27 00:04:19 crc kubenswrapper[4903]: E1127 00:04:19.917403 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="162c1054-724a-4caf-b0eb-315b93e441e5" containerName="extract-content" Nov 27 00:04:19 crc kubenswrapper[4903]: I1127 00:04:19.917410 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="162c1054-724a-4caf-b0eb-315b93e441e5" containerName="extract-content" Nov 27 00:04:19 crc kubenswrapper[4903]: E1127 00:04:19.917443 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="162c1054-724a-4caf-b0eb-315b93e441e5" containerName="registry-server" Nov 27 00:04:19 crc kubenswrapper[4903]: I1127 00:04:19.917449 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="162c1054-724a-4caf-b0eb-315b93e441e5" containerName="registry-server" Nov 27 00:04:19 crc kubenswrapper[4903]: I1127 00:04:19.917734 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="162c1054-724a-4caf-b0eb-315b93e441e5" containerName="registry-server" Nov 27 00:04:19 crc kubenswrapper[4903]: I1127 00:04:19.919389 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cqx62" Nov 27 00:04:19 crc kubenswrapper[4903]: I1127 00:04:19.937263 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cqx62"] Nov 27 00:04:20 crc kubenswrapper[4903]: I1127 00:04:20.026218 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3a3f026-c4b0-4548-b126-2c1b5ef94597-catalog-content\") pod \"community-operators-cqx62\" (UID: \"b3a3f026-c4b0-4548-b126-2c1b5ef94597\") " pod="openshift-marketplace/community-operators-cqx62" Nov 27 00:04:20 crc kubenswrapper[4903]: I1127 00:04:20.026310 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7n4v9\" (UniqueName: \"kubernetes.io/projected/b3a3f026-c4b0-4548-b126-2c1b5ef94597-kube-api-access-7n4v9\") pod \"community-operators-cqx62\" (UID: \"b3a3f026-c4b0-4548-b126-2c1b5ef94597\") " pod="openshift-marketplace/community-operators-cqx62" Nov 27 00:04:20 crc kubenswrapper[4903]: I1127 00:04:20.026448 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3a3f026-c4b0-4548-b126-2c1b5ef94597-utilities\") pod \"community-operators-cqx62\" (UID: \"b3a3f026-c4b0-4548-b126-2c1b5ef94597\") " pod="openshift-marketplace/community-operators-cqx62" Nov 27 00:04:20 crc kubenswrapper[4903]: I1127 00:04:20.129381 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3a3f026-c4b0-4548-b126-2c1b5ef94597-catalog-content\") pod \"community-operators-cqx62\" (UID: \"b3a3f026-c4b0-4548-b126-2c1b5ef94597\") " pod="openshift-marketplace/community-operators-cqx62" Nov 27 00:04:20 crc kubenswrapper[4903]: I1127 00:04:20.129529 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7n4v9\" (UniqueName: \"kubernetes.io/projected/b3a3f026-c4b0-4548-b126-2c1b5ef94597-kube-api-access-7n4v9\") pod \"community-operators-cqx62\" (UID: \"b3a3f026-c4b0-4548-b126-2c1b5ef94597\") " pod="openshift-marketplace/community-operators-cqx62" Nov 27 00:04:20 crc kubenswrapper[4903]: I1127 00:04:20.129896 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3a3f026-c4b0-4548-b126-2c1b5ef94597-utilities\") pod \"community-operators-cqx62\" (UID: \"b3a3f026-c4b0-4548-b126-2c1b5ef94597\") " pod="openshift-marketplace/community-operators-cqx62" Nov 27 00:04:20 crc kubenswrapper[4903]: I1127 00:04:20.131366 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3a3f026-c4b0-4548-b126-2c1b5ef94597-catalog-content\") pod \"community-operators-cqx62\" (UID: \"b3a3f026-c4b0-4548-b126-2c1b5ef94597\") " pod="openshift-marketplace/community-operators-cqx62" Nov 27 00:04:20 crc kubenswrapper[4903]: I1127 00:04:20.132554 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3a3f026-c4b0-4548-b126-2c1b5ef94597-utilities\") pod \"community-operators-cqx62\" (UID: \"b3a3f026-c4b0-4548-b126-2c1b5ef94597\") " pod="openshift-marketplace/community-operators-cqx62" Nov 27 00:04:20 crc kubenswrapper[4903]: I1127 00:04:20.167677 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7n4v9\" (UniqueName: \"kubernetes.io/projected/b3a3f026-c4b0-4548-b126-2c1b5ef94597-kube-api-access-7n4v9\") pod \"community-operators-cqx62\" (UID: \"b3a3f026-c4b0-4548-b126-2c1b5ef94597\") " pod="openshift-marketplace/community-operators-cqx62" Nov 27 00:04:20 crc kubenswrapper[4903]: I1127 00:04:20.259495 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cqx62" Nov 27 00:04:21 crc kubenswrapper[4903]: I1127 00:04:21.142757 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cqx62"] Nov 27 00:04:21 crc kubenswrapper[4903]: I1127 00:04:21.272159 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cqx62" event={"ID":"b3a3f026-c4b0-4548-b126-2c1b5ef94597","Type":"ContainerStarted","Data":"7d61902d4d843983e1964177a8c338a3bf0fda0b1cff7dc66b9a03b74671bae4"} Nov 27 00:04:22 crc kubenswrapper[4903]: I1127 00:04:22.284190 4903 generic.go:334] "Generic (PLEG): container finished" podID="b3a3f026-c4b0-4548-b126-2c1b5ef94597" containerID="2b3893a7fa4b19083710c5ca6ab167656d3aa6f3f09c5a1ea286d1a38856edd7" exitCode=0 Nov 27 00:04:22 crc kubenswrapper[4903]: I1127 00:04:22.285933 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cqx62" event={"ID":"b3a3f026-c4b0-4548-b126-2c1b5ef94597","Type":"ContainerDied","Data":"2b3893a7fa4b19083710c5ca6ab167656d3aa6f3f09c5a1ea286d1a38856edd7"} Nov 27 00:04:24 crc kubenswrapper[4903]: I1127 00:04:24.309733 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cqx62" event={"ID":"b3a3f026-c4b0-4548-b126-2c1b5ef94597","Type":"ContainerStarted","Data":"186dfe3ee7236c31cabdb9c997622c3e39db81b75beb1af26b16bd772d6ef046"} Nov 27 00:04:25 crc kubenswrapper[4903]: E1127 00:04:25.022418 4903 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3a3f026_c4b0_4548_b126_2c1b5ef94597.slice/crio-conmon-186dfe3ee7236c31cabdb9c997622c3e39db81b75beb1af26b16bd772d6ef046.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3a3f026_c4b0_4548_b126_2c1b5ef94597.slice/crio-186dfe3ee7236c31cabdb9c997622c3e39db81b75beb1af26b16bd772d6ef046.scope\": RecentStats: unable to find data in memory cache]" Nov 27 00:04:25 crc kubenswrapper[4903]: I1127 00:04:25.029510 4903 scope.go:117] "RemoveContainer" containerID="02f515062b7ff2bda08e09cb8c72a3611586455ba29e6f79c4fb592e1f750455" Nov 27 00:04:25 crc kubenswrapper[4903]: E1127 00:04:25.029760 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:04:25 crc kubenswrapper[4903]: I1127 00:04:25.321332 4903 generic.go:334] "Generic (PLEG): container finished" podID="b3a3f026-c4b0-4548-b126-2c1b5ef94597" containerID="186dfe3ee7236c31cabdb9c997622c3e39db81b75beb1af26b16bd772d6ef046" exitCode=0 Nov 27 00:04:25 crc kubenswrapper[4903]: I1127 00:04:25.321393 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cqx62" event={"ID":"b3a3f026-c4b0-4548-b126-2c1b5ef94597","Type":"ContainerDied","Data":"186dfe3ee7236c31cabdb9c997622c3e39db81b75beb1af26b16bd772d6ef046"} Nov 27 00:04:26 crc kubenswrapper[4903]: I1127 00:04:26.339349 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cqx62" event={"ID":"b3a3f026-c4b0-4548-b126-2c1b5ef94597","Type":"ContainerStarted","Data":"bac9e2e40ed55cfeb2029ead206237823f0e4e6aca2fb0671036514942c823b6"} Nov 27 00:04:26 crc kubenswrapper[4903]: I1127 00:04:26.367594 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-cqx62" podStartSLOduration=3.602881707 podStartE2EDuration="7.367572045s" podCreationTimestamp="2025-11-27 00:04:19 +0000 UTC" firstStartedPulling="2025-11-27 00:04:22.28993344 +0000 UTC m=+6190.980168370" lastFinishedPulling="2025-11-27 00:04:26.054623798 +0000 UTC m=+6194.744858708" observedRunningTime="2025-11-27 00:04:26.356548121 +0000 UTC m=+6195.046783041" watchObservedRunningTime="2025-11-27 00:04:26.367572045 +0000 UTC m=+6195.057806955" Nov 27 00:04:30 crc kubenswrapper[4903]: I1127 00:04:30.260127 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-cqx62" Nov 27 00:04:30 crc kubenswrapper[4903]: I1127 00:04:30.260793 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-cqx62" Nov 27 00:04:30 crc kubenswrapper[4903]: I1127 00:04:30.331096 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-cqx62" Nov 27 00:04:38 crc kubenswrapper[4903]: I1127 00:04:38.031310 4903 scope.go:117] "RemoveContainer" containerID="02f515062b7ff2bda08e09cb8c72a3611586455ba29e6f79c4fb592e1f750455" Nov 27 00:04:38 crc kubenswrapper[4903]: E1127 00:04:38.032062 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:04:40 crc kubenswrapper[4903]: I1127 00:04:40.327231 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-cqx62" Nov 27 00:04:40 crc kubenswrapper[4903]: I1127 00:04:40.420980 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cqx62"] Nov 27 00:04:40 crc kubenswrapper[4903]: I1127 00:04:40.521073 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-cqx62" podUID="b3a3f026-c4b0-4548-b126-2c1b5ef94597" containerName="registry-server" containerID="cri-o://bac9e2e40ed55cfeb2029ead206237823f0e4e6aca2fb0671036514942c823b6" gracePeriod=2 Nov 27 00:04:41 crc kubenswrapper[4903]: I1127 00:04:41.097062 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cqx62" Nov 27 00:04:41 crc kubenswrapper[4903]: I1127 00:04:41.174242 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3a3f026-c4b0-4548-b126-2c1b5ef94597-utilities\") pod \"b3a3f026-c4b0-4548-b126-2c1b5ef94597\" (UID: \"b3a3f026-c4b0-4548-b126-2c1b5ef94597\") " Nov 27 00:04:41 crc kubenswrapper[4903]: I1127 00:04:41.174738 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3a3f026-c4b0-4548-b126-2c1b5ef94597-catalog-content\") pod \"b3a3f026-c4b0-4548-b126-2c1b5ef94597\" (UID: \"b3a3f026-c4b0-4548-b126-2c1b5ef94597\") " Nov 27 00:04:41 crc kubenswrapper[4903]: I1127 00:04:41.174831 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7n4v9\" (UniqueName: \"kubernetes.io/projected/b3a3f026-c4b0-4548-b126-2c1b5ef94597-kube-api-access-7n4v9\") pod \"b3a3f026-c4b0-4548-b126-2c1b5ef94597\" (UID: \"b3a3f026-c4b0-4548-b126-2c1b5ef94597\") " Nov 27 00:04:41 crc kubenswrapper[4903]: I1127 00:04:41.175011 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3a3f026-c4b0-4548-b126-2c1b5ef94597-utilities" (OuterVolumeSpecName: "utilities") pod "b3a3f026-c4b0-4548-b126-2c1b5ef94597" (UID: "b3a3f026-c4b0-4548-b126-2c1b5ef94597"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 27 00:04:41 crc kubenswrapper[4903]: I1127 00:04:41.175495 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3a3f026-c4b0-4548-b126-2c1b5ef94597-utilities\") on node \"crc\" DevicePath \"\"" Nov 27 00:04:41 crc kubenswrapper[4903]: I1127 00:04:41.183836 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3a3f026-c4b0-4548-b126-2c1b5ef94597-kube-api-access-7n4v9" (OuterVolumeSpecName: "kube-api-access-7n4v9") pod "b3a3f026-c4b0-4548-b126-2c1b5ef94597" (UID: "b3a3f026-c4b0-4548-b126-2c1b5ef94597"). InnerVolumeSpecName "kube-api-access-7n4v9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:04:41 crc kubenswrapper[4903]: I1127 00:04:41.244449 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3a3f026-c4b0-4548-b126-2c1b5ef94597-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b3a3f026-c4b0-4548-b126-2c1b5ef94597" (UID: "b3a3f026-c4b0-4548-b126-2c1b5ef94597"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 27 00:04:41 crc kubenswrapper[4903]: I1127 00:04:41.278217 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3a3f026-c4b0-4548-b126-2c1b5ef94597-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 27 00:04:41 crc kubenswrapper[4903]: I1127 00:04:41.278280 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7n4v9\" (UniqueName: \"kubernetes.io/projected/b3a3f026-c4b0-4548-b126-2c1b5ef94597-kube-api-access-7n4v9\") on node \"crc\" DevicePath \"\"" Nov 27 00:04:41 crc kubenswrapper[4903]: I1127 00:04:41.551565 4903 generic.go:334] "Generic (PLEG): container finished" podID="b3a3f026-c4b0-4548-b126-2c1b5ef94597" containerID="bac9e2e40ed55cfeb2029ead206237823f0e4e6aca2fb0671036514942c823b6" exitCode=0 Nov 27 00:04:41 crc kubenswrapper[4903]: I1127 00:04:41.551807 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cqx62" Nov 27 00:04:41 crc kubenswrapper[4903]: I1127 00:04:41.551840 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cqx62" event={"ID":"b3a3f026-c4b0-4548-b126-2c1b5ef94597","Type":"ContainerDied","Data":"bac9e2e40ed55cfeb2029ead206237823f0e4e6aca2fb0671036514942c823b6"} Nov 27 00:04:41 crc kubenswrapper[4903]: I1127 00:04:41.552173 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cqx62" event={"ID":"b3a3f026-c4b0-4548-b126-2c1b5ef94597","Type":"ContainerDied","Data":"7d61902d4d843983e1964177a8c338a3bf0fda0b1cff7dc66b9a03b74671bae4"} Nov 27 00:04:41 crc kubenswrapper[4903]: I1127 00:04:41.552221 4903 scope.go:117] "RemoveContainer" containerID="bac9e2e40ed55cfeb2029ead206237823f0e4e6aca2fb0671036514942c823b6" Nov 27 00:04:41 crc kubenswrapper[4903]: I1127 00:04:41.624736 4903 scope.go:117] "RemoveContainer" containerID="186dfe3ee7236c31cabdb9c997622c3e39db81b75beb1af26b16bd772d6ef046" Nov 27 00:04:41 crc kubenswrapper[4903]: I1127 00:04:41.625631 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cqx62"] Nov 27 00:04:41 crc kubenswrapper[4903]: I1127 00:04:41.649114 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-cqx62"] Nov 27 00:04:41 crc kubenswrapper[4903]: I1127 00:04:41.661398 4903 scope.go:117] "RemoveContainer" containerID="2b3893a7fa4b19083710c5ca6ab167656d3aa6f3f09c5a1ea286d1a38856edd7" Nov 27 00:04:41 crc kubenswrapper[4903]: I1127 00:04:41.702158 4903 scope.go:117] "RemoveContainer" containerID="bac9e2e40ed55cfeb2029ead206237823f0e4e6aca2fb0671036514942c823b6" Nov 27 00:04:41 crc kubenswrapper[4903]: E1127 00:04:41.702616 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bac9e2e40ed55cfeb2029ead206237823f0e4e6aca2fb0671036514942c823b6\": container with ID starting with bac9e2e40ed55cfeb2029ead206237823f0e4e6aca2fb0671036514942c823b6 not found: ID does not exist" containerID="bac9e2e40ed55cfeb2029ead206237823f0e4e6aca2fb0671036514942c823b6" Nov 27 00:04:41 crc kubenswrapper[4903]: I1127 00:04:41.702657 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bac9e2e40ed55cfeb2029ead206237823f0e4e6aca2fb0671036514942c823b6"} err="failed to get container status \"bac9e2e40ed55cfeb2029ead206237823f0e4e6aca2fb0671036514942c823b6\": rpc error: code = NotFound desc = could not find container \"bac9e2e40ed55cfeb2029ead206237823f0e4e6aca2fb0671036514942c823b6\": container with ID starting with bac9e2e40ed55cfeb2029ead206237823f0e4e6aca2fb0671036514942c823b6 not found: ID does not exist" Nov 27 00:04:41 crc kubenswrapper[4903]: I1127 00:04:41.702688 4903 scope.go:117] "RemoveContainer" containerID="186dfe3ee7236c31cabdb9c997622c3e39db81b75beb1af26b16bd772d6ef046" Nov 27 00:04:41 crc kubenswrapper[4903]: E1127 00:04:41.703053 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"186dfe3ee7236c31cabdb9c997622c3e39db81b75beb1af26b16bd772d6ef046\": container with ID starting with 186dfe3ee7236c31cabdb9c997622c3e39db81b75beb1af26b16bd772d6ef046 not found: ID does not exist" containerID="186dfe3ee7236c31cabdb9c997622c3e39db81b75beb1af26b16bd772d6ef046" Nov 27 00:04:41 crc kubenswrapper[4903]: I1127 00:04:41.703081 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"186dfe3ee7236c31cabdb9c997622c3e39db81b75beb1af26b16bd772d6ef046"} err="failed to get container status \"186dfe3ee7236c31cabdb9c997622c3e39db81b75beb1af26b16bd772d6ef046\": rpc error: code = NotFound desc = could not find container \"186dfe3ee7236c31cabdb9c997622c3e39db81b75beb1af26b16bd772d6ef046\": container with ID starting with 186dfe3ee7236c31cabdb9c997622c3e39db81b75beb1af26b16bd772d6ef046 not found: ID does not exist" Nov 27 00:04:41 crc kubenswrapper[4903]: I1127 00:04:41.703095 4903 scope.go:117] "RemoveContainer" containerID="2b3893a7fa4b19083710c5ca6ab167656d3aa6f3f09c5a1ea286d1a38856edd7" Nov 27 00:04:41 crc kubenswrapper[4903]: E1127 00:04:41.703324 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2b3893a7fa4b19083710c5ca6ab167656d3aa6f3f09c5a1ea286d1a38856edd7\": container with ID starting with 2b3893a7fa4b19083710c5ca6ab167656d3aa6f3f09c5a1ea286d1a38856edd7 not found: ID does not exist" containerID="2b3893a7fa4b19083710c5ca6ab167656d3aa6f3f09c5a1ea286d1a38856edd7" Nov 27 00:04:41 crc kubenswrapper[4903]: I1127 00:04:41.703349 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2b3893a7fa4b19083710c5ca6ab167656d3aa6f3f09c5a1ea286d1a38856edd7"} err="failed to get container status \"2b3893a7fa4b19083710c5ca6ab167656d3aa6f3f09c5a1ea286d1a38856edd7\": rpc error: code = NotFound desc = could not find container \"2b3893a7fa4b19083710c5ca6ab167656d3aa6f3f09c5a1ea286d1a38856edd7\": container with ID starting with 2b3893a7fa4b19083710c5ca6ab167656d3aa6f3f09c5a1ea286d1a38856edd7 not found: ID does not exist" Nov 27 00:04:42 crc kubenswrapper[4903]: I1127 00:04:42.041768 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3a3f026-c4b0-4548-b126-2c1b5ef94597" path="/var/lib/kubelet/pods/b3a3f026-c4b0-4548-b126-2c1b5ef94597/volumes" Nov 27 00:04:49 crc kubenswrapper[4903]: I1127 00:04:49.029151 4903 scope.go:117] "RemoveContainer" containerID="02f515062b7ff2bda08e09cb8c72a3611586455ba29e6f79c4fb592e1f750455" Nov 27 00:04:49 crc kubenswrapper[4903]: E1127 00:04:49.030381 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:05:01 crc kubenswrapper[4903]: I1127 00:05:01.029239 4903 scope.go:117] "RemoveContainer" containerID="02f515062b7ff2bda08e09cb8c72a3611586455ba29e6f79c4fb592e1f750455" Nov 27 00:05:01 crc kubenswrapper[4903]: E1127 00:05:01.030368 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:05:16 crc kubenswrapper[4903]: I1127 00:05:16.028798 4903 scope.go:117] "RemoveContainer" containerID="02f515062b7ff2bda08e09cb8c72a3611586455ba29e6f79c4fb592e1f750455" Nov 27 00:05:16 crc kubenswrapper[4903]: E1127 00:05:16.029840 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:05:28 crc kubenswrapper[4903]: I1127 00:05:28.645598 4903 scope.go:117] "RemoveContainer" containerID="92bfe1e9d8f564485dbb92dc392fb152e5cc6ea01f94fe4e8aa674c58da5c098" Nov 27 00:05:30 crc kubenswrapper[4903]: I1127 00:05:30.030461 4903 scope.go:117] "RemoveContainer" containerID="02f515062b7ff2bda08e09cb8c72a3611586455ba29e6f79c4fb592e1f750455" Nov 27 00:05:30 crc kubenswrapper[4903]: E1127 00:05:30.031261 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:05:45 crc kubenswrapper[4903]: I1127 00:05:45.028814 4903 scope.go:117] "RemoveContainer" containerID="02f515062b7ff2bda08e09cb8c72a3611586455ba29e6f79c4fb592e1f750455" Nov 27 00:05:45 crc kubenswrapper[4903]: E1127 00:05:45.029514 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:05:49 crc kubenswrapper[4903]: I1127 00:05:49.566582 4903 generic.go:334] "Generic (PLEG): container finished" podID="646193d4-60c2-440c-a998-0ec89db5aaf3" containerID="7aeb1ebe605c4495c166b453e0b234ca6f54afce23dad0c0abb39ace008b10d4" exitCode=0 Nov 27 00:05:49 crc kubenswrapper[4903]: I1127 00:05:49.566724 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w6swq/must-gather-kczv5" event={"ID":"646193d4-60c2-440c-a998-0ec89db5aaf3","Type":"ContainerDied","Data":"7aeb1ebe605c4495c166b453e0b234ca6f54afce23dad0c0abb39ace008b10d4"} Nov 27 00:05:49 crc kubenswrapper[4903]: I1127 00:05:49.568295 4903 scope.go:117] "RemoveContainer" containerID="7aeb1ebe605c4495c166b453e0b234ca6f54afce23dad0c0abb39ace008b10d4" Nov 27 00:05:49 crc kubenswrapper[4903]: I1127 00:05:49.846831 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-w6swq_must-gather-kczv5_646193d4-60c2-440c-a998-0ec89db5aaf3/gather/0.log" Nov 27 00:05:56 crc kubenswrapper[4903]: I1127 00:05:56.029895 4903 scope.go:117] "RemoveContainer" containerID="02f515062b7ff2bda08e09cb8c72a3611586455ba29e6f79c4fb592e1f750455" Nov 27 00:05:56 crc kubenswrapper[4903]: E1127 00:05:56.031028 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:05:59 crc kubenswrapper[4903]: I1127 00:05:59.163436 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-w6swq/must-gather-kczv5"] Nov 27 00:05:59 crc kubenswrapper[4903]: I1127 00:05:59.164288 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-w6swq/must-gather-kczv5" podUID="646193d4-60c2-440c-a998-0ec89db5aaf3" containerName="copy" containerID="cri-o://f67a7c46625e782ae60275265b652c953d23a3100fa46871dc12c114117c89c8" gracePeriod=2 Nov 27 00:05:59 crc kubenswrapper[4903]: I1127 00:05:59.178547 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-w6swq/must-gather-kczv5"] Nov 27 00:05:59 crc kubenswrapper[4903]: I1127 00:05:59.669260 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-w6swq_must-gather-kczv5_646193d4-60c2-440c-a998-0ec89db5aaf3/copy/0.log" Nov 27 00:05:59 crc kubenswrapper[4903]: I1127 00:05:59.670033 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w6swq/must-gather-kczv5" Nov 27 00:05:59 crc kubenswrapper[4903]: I1127 00:05:59.692631 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-w6swq_must-gather-kczv5_646193d4-60c2-440c-a998-0ec89db5aaf3/copy/0.log" Nov 27 00:05:59 crc kubenswrapper[4903]: I1127 00:05:59.693283 4903 generic.go:334] "Generic (PLEG): container finished" podID="646193d4-60c2-440c-a998-0ec89db5aaf3" containerID="f67a7c46625e782ae60275265b652c953d23a3100fa46871dc12c114117c89c8" exitCode=143 Nov 27 00:05:59 crc kubenswrapper[4903]: I1127 00:05:59.693337 4903 scope.go:117] "RemoveContainer" containerID="f67a7c46625e782ae60275265b652c953d23a3100fa46871dc12c114117c89c8" Nov 27 00:05:59 crc kubenswrapper[4903]: I1127 00:05:59.693360 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w6swq/must-gather-kczv5" Nov 27 00:05:59 crc kubenswrapper[4903]: I1127 00:05:59.708924 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/646193d4-60c2-440c-a998-0ec89db5aaf3-must-gather-output\") pod \"646193d4-60c2-440c-a998-0ec89db5aaf3\" (UID: \"646193d4-60c2-440c-a998-0ec89db5aaf3\") " Nov 27 00:05:59 crc kubenswrapper[4903]: I1127 00:05:59.709015 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k6pg4\" (UniqueName: \"kubernetes.io/projected/646193d4-60c2-440c-a998-0ec89db5aaf3-kube-api-access-k6pg4\") pod \"646193d4-60c2-440c-a998-0ec89db5aaf3\" (UID: \"646193d4-60c2-440c-a998-0ec89db5aaf3\") " Nov 27 00:05:59 crc kubenswrapper[4903]: I1127 00:05:59.715022 4903 scope.go:117] "RemoveContainer" containerID="7aeb1ebe605c4495c166b453e0b234ca6f54afce23dad0c0abb39ace008b10d4" Nov 27 00:05:59 crc kubenswrapper[4903]: I1127 00:05:59.716385 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/646193d4-60c2-440c-a998-0ec89db5aaf3-kube-api-access-k6pg4" (OuterVolumeSpecName: "kube-api-access-k6pg4") pod "646193d4-60c2-440c-a998-0ec89db5aaf3" (UID: "646193d4-60c2-440c-a998-0ec89db5aaf3"). InnerVolumeSpecName "kube-api-access-k6pg4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:05:59 crc kubenswrapper[4903]: I1127 00:05:59.814558 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k6pg4\" (UniqueName: \"kubernetes.io/projected/646193d4-60c2-440c-a998-0ec89db5aaf3-kube-api-access-k6pg4\") on node \"crc\" DevicePath \"\"" Nov 27 00:05:59 crc kubenswrapper[4903]: I1127 00:05:59.832018 4903 scope.go:117] "RemoveContainer" containerID="f67a7c46625e782ae60275265b652c953d23a3100fa46871dc12c114117c89c8" Nov 27 00:05:59 crc kubenswrapper[4903]: E1127 00:05:59.833710 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f67a7c46625e782ae60275265b652c953d23a3100fa46871dc12c114117c89c8\": container with ID starting with f67a7c46625e782ae60275265b652c953d23a3100fa46871dc12c114117c89c8 not found: ID does not exist" containerID="f67a7c46625e782ae60275265b652c953d23a3100fa46871dc12c114117c89c8" Nov 27 00:05:59 crc kubenswrapper[4903]: I1127 00:05:59.833744 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f67a7c46625e782ae60275265b652c953d23a3100fa46871dc12c114117c89c8"} err="failed to get container status \"f67a7c46625e782ae60275265b652c953d23a3100fa46871dc12c114117c89c8\": rpc error: code = NotFound desc = could not find container \"f67a7c46625e782ae60275265b652c953d23a3100fa46871dc12c114117c89c8\": container with ID starting with f67a7c46625e782ae60275265b652c953d23a3100fa46871dc12c114117c89c8 not found: ID does not exist" Nov 27 00:05:59 crc kubenswrapper[4903]: I1127 00:05:59.833765 4903 scope.go:117] "RemoveContainer" containerID="7aeb1ebe605c4495c166b453e0b234ca6f54afce23dad0c0abb39ace008b10d4" Nov 27 00:05:59 crc kubenswrapper[4903]: E1127 00:05:59.834618 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7aeb1ebe605c4495c166b453e0b234ca6f54afce23dad0c0abb39ace008b10d4\": container with ID starting with 7aeb1ebe605c4495c166b453e0b234ca6f54afce23dad0c0abb39ace008b10d4 not found: ID does not exist" containerID="7aeb1ebe605c4495c166b453e0b234ca6f54afce23dad0c0abb39ace008b10d4" Nov 27 00:05:59 crc kubenswrapper[4903]: I1127 00:05:59.834670 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7aeb1ebe605c4495c166b453e0b234ca6f54afce23dad0c0abb39ace008b10d4"} err="failed to get container status \"7aeb1ebe605c4495c166b453e0b234ca6f54afce23dad0c0abb39ace008b10d4\": rpc error: code = NotFound desc = could not find container \"7aeb1ebe605c4495c166b453e0b234ca6f54afce23dad0c0abb39ace008b10d4\": container with ID starting with 7aeb1ebe605c4495c166b453e0b234ca6f54afce23dad0c0abb39ace008b10d4 not found: ID does not exist" Nov 27 00:05:59 crc kubenswrapper[4903]: I1127 00:05:59.891153 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/646193d4-60c2-440c-a998-0ec89db5aaf3-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "646193d4-60c2-440c-a998-0ec89db5aaf3" (UID: "646193d4-60c2-440c-a998-0ec89db5aaf3"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 27 00:05:59 crc kubenswrapper[4903]: I1127 00:05:59.916643 4903 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/646193d4-60c2-440c-a998-0ec89db5aaf3-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 27 00:06:00 crc kubenswrapper[4903]: I1127 00:06:00.043847 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="646193d4-60c2-440c-a998-0ec89db5aaf3" path="/var/lib/kubelet/pods/646193d4-60c2-440c-a998-0ec89db5aaf3/volumes" Nov 27 00:06:11 crc kubenswrapper[4903]: I1127 00:06:11.028922 4903 scope.go:117] "RemoveContainer" containerID="02f515062b7ff2bda08e09cb8c72a3611586455ba29e6f79c4fb592e1f750455" Nov 27 00:06:11 crc kubenswrapper[4903]: E1127 00:06:11.029768 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:06:26 crc kubenswrapper[4903]: I1127 00:06:26.029232 4903 scope.go:117] "RemoveContainer" containerID="02f515062b7ff2bda08e09cb8c72a3611586455ba29e6f79c4fb592e1f750455" Nov 27 00:06:26 crc kubenswrapper[4903]: E1127 00:06:26.030347 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:06:28 crc kubenswrapper[4903]: I1127 00:06:28.757243 4903 scope.go:117] "RemoveContainer" containerID="981153dadb312b4461573a685c81a450f81374341369fc7b1e658851975e570e" Nov 27 00:06:41 crc kubenswrapper[4903]: I1127 00:06:41.029084 4903 scope.go:117] "RemoveContainer" containerID="02f515062b7ff2bda08e09cb8c72a3611586455ba29e6f79c4fb592e1f750455" Nov 27 00:06:42 crc kubenswrapper[4903]: I1127 00:06:42.239091 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerStarted","Data":"084380461b71e9cb28e7b6a51fa622a5e26c01262b544c1e57111d81686a4f65"} Nov 27 00:08:56 crc kubenswrapper[4903]: I1127 00:08:56.808649 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-tczvh/must-gather-855zj"] Nov 27 00:08:56 crc kubenswrapper[4903]: E1127 00:08:56.813463 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3a3f026-c4b0-4548-b126-2c1b5ef94597" containerName="extract-utilities" Nov 27 00:08:56 crc kubenswrapper[4903]: I1127 00:08:56.813485 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3a3f026-c4b0-4548-b126-2c1b5ef94597" containerName="extract-utilities" Nov 27 00:08:56 crc kubenswrapper[4903]: E1127 00:08:56.813523 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3a3f026-c4b0-4548-b126-2c1b5ef94597" containerName="extract-content" Nov 27 00:08:56 crc kubenswrapper[4903]: I1127 00:08:56.813530 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3a3f026-c4b0-4548-b126-2c1b5ef94597" containerName="extract-content" Nov 27 00:08:56 crc kubenswrapper[4903]: E1127 00:08:56.813552 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="646193d4-60c2-440c-a998-0ec89db5aaf3" containerName="copy" Nov 27 00:08:56 crc kubenswrapper[4903]: I1127 00:08:56.813559 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="646193d4-60c2-440c-a998-0ec89db5aaf3" containerName="copy" Nov 27 00:08:56 crc kubenswrapper[4903]: E1127 00:08:56.813601 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3a3f026-c4b0-4548-b126-2c1b5ef94597" containerName="registry-server" Nov 27 00:08:56 crc kubenswrapper[4903]: I1127 00:08:56.813607 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3a3f026-c4b0-4548-b126-2c1b5ef94597" containerName="registry-server" Nov 27 00:08:56 crc kubenswrapper[4903]: E1127 00:08:56.813623 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="646193d4-60c2-440c-a998-0ec89db5aaf3" containerName="gather" Nov 27 00:08:56 crc kubenswrapper[4903]: I1127 00:08:56.813629 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="646193d4-60c2-440c-a998-0ec89db5aaf3" containerName="gather" Nov 27 00:08:56 crc kubenswrapper[4903]: I1127 00:08:56.813967 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="646193d4-60c2-440c-a998-0ec89db5aaf3" containerName="copy" Nov 27 00:08:56 crc kubenswrapper[4903]: I1127 00:08:56.813992 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3a3f026-c4b0-4548-b126-2c1b5ef94597" containerName="registry-server" Nov 27 00:08:56 crc kubenswrapper[4903]: I1127 00:08:56.814033 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="646193d4-60c2-440c-a998-0ec89db5aaf3" containerName="gather" Nov 27 00:08:56 crc kubenswrapper[4903]: I1127 00:08:56.816195 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tczvh/must-gather-855zj" Nov 27 00:08:56 crc kubenswrapper[4903]: I1127 00:08:56.823824 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-tczvh"/"openshift-service-ca.crt" Nov 27 00:08:56 crc kubenswrapper[4903]: I1127 00:08:56.824164 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-tczvh"/"kube-root-ca.crt" Nov 27 00:08:56 crc kubenswrapper[4903]: I1127 00:08:56.834401 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-tczvh/must-gather-855zj"] Nov 27 00:08:56 crc kubenswrapper[4903]: I1127 00:08:56.902062 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/46129129-7d0f-416e-bdc2-500691655897-must-gather-output\") pod \"must-gather-855zj\" (UID: \"46129129-7d0f-416e-bdc2-500691655897\") " pod="openshift-must-gather-tczvh/must-gather-855zj" Nov 27 00:08:56 crc kubenswrapper[4903]: I1127 00:08:56.902542 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hpwfk\" (UniqueName: \"kubernetes.io/projected/46129129-7d0f-416e-bdc2-500691655897-kube-api-access-hpwfk\") pod \"must-gather-855zj\" (UID: \"46129129-7d0f-416e-bdc2-500691655897\") " pod="openshift-must-gather-tczvh/must-gather-855zj" Nov 27 00:08:57 crc kubenswrapper[4903]: I1127 00:08:57.005568 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hpwfk\" (UniqueName: \"kubernetes.io/projected/46129129-7d0f-416e-bdc2-500691655897-kube-api-access-hpwfk\") pod \"must-gather-855zj\" (UID: \"46129129-7d0f-416e-bdc2-500691655897\") " pod="openshift-must-gather-tczvh/must-gather-855zj" Nov 27 00:08:57 crc kubenswrapper[4903]: I1127 00:08:57.005988 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/46129129-7d0f-416e-bdc2-500691655897-must-gather-output\") pod \"must-gather-855zj\" (UID: \"46129129-7d0f-416e-bdc2-500691655897\") " pod="openshift-must-gather-tczvh/must-gather-855zj" Nov 27 00:08:57 crc kubenswrapper[4903]: I1127 00:08:57.006387 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/46129129-7d0f-416e-bdc2-500691655897-must-gather-output\") pod \"must-gather-855zj\" (UID: \"46129129-7d0f-416e-bdc2-500691655897\") " pod="openshift-must-gather-tczvh/must-gather-855zj" Nov 27 00:08:57 crc kubenswrapper[4903]: I1127 00:08:57.028160 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hpwfk\" (UniqueName: \"kubernetes.io/projected/46129129-7d0f-416e-bdc2-500691655897-kube-api-access-hpwfk\") pod \"must-gather-855zj\" (UID: \"46129129-7d0f-416e-bdc2-500691655897\") " pod="openshift-must-gather-tczvh/must-gather-855zj" Nov 27 00:08:57 crc kubenswrapper[4903]: I1127 00:08:57.137143 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tczvh/must-gather-855zj" Nov 27 00:08:57 crc kubenswrapper[4903]: I1127 00:08:57.669239 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-tczvh/must-gather-855zj"] Nov 27 00:08:58 crc kubenswrapper[4903]: I1127 00:08:58.048324 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-tczvh/must-gather-855zj" event={"ID":"46129129-7d0f-416e-bdc2-500691655897","Type":"ContainerStarted","Data":"271385abc3b0be5928bc849a6a1c35dd6c67b3c936696a574f666bfaad6b3401"} Nov 27 00:08:59 crc kubenswrapper[4903]: I1127 00:08:59.061863 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-tczvh/must-gather-855zj" event={"ID":"46129129-7d0f-416e-bdc2-500691655897","Type":"ContainerStarted","Data":"4c9316f66d88b024ff84891899d7ceceac26831eb2c8ba8e1be8f508497ea4e0"} Nov 27 00:08:59 crc kubenswrapper[4903]: I1127 00:08:59.062153 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-tczvh/must-gather-855zj" event={"ID":"46129129-7d0f-416e-bdc2-500691655897","Type":"ContainerStarted","Data":"e7300a453dc8b7dd11e5f03220802f58de7bc86cf5780ed2ae986989bb9b849c"} Nov 27 00:08:59 crc kubenswrapper[4903]: I1127 00:08:59.080291 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-tczvh/must-gather-855zj" podStartSLOduration=3.080266421 podStartE2EDuration="3.080266421s" podCreationTimestamp="2025-11-27 00:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-27 00:08:59.075959525 +0000 UTC m=+6467.766194425" watchObservedRunningTime="2025-11-27 00:08:59.080266421 +0000 UTC m=+6467.770501331" Nov 27 00:09:01 crc kubenswrapper[4903]: I1127 00:09:01.856781 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-tczvh/crc-debug-p6zrc"] Nov 27 00:09:01 crc kubenswrapper[4903]: I1127 00:09:01.858810 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tczvh/crc-debug-p6zrc" Nov 27 00:09:01 crc kubenswrapper[4903]: I1127 00:09:01.860930 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-tczvh"/"default-dockercfg-ntvfq" Nov 27 00:09:01 crc kubenswrapper[4903]: I1127 00:09:01.981084 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 27 00:09:01 crc kubenswrapper[4903]: I1127 00:09:01.981142 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 27 00:09:02 crc kubenswrapper[4903]: I1127 00:09:02.043112 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hb99j\" (UniqueName: \"kubernetes.io/projected/9b153332-9766-4aa1-b986-49d51f20c414-kube-api-access-hb99j\") pod \"crc-debug-p6zrc\" (UID: \"9b153332-9766-4aa1-b986-49d51f20c414\") " pod="openshift-must-gather-tczvh/crc-debug-p6zrc" Nov 27 00:09:02 crc kubenswrapper[4903]: I1127 00:09:02.043364 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9b153332-9766-4aa1-b986-49d51f20c414-host\") pod \"crc-debug-p6zrc\" (UID: \"9b153332-9766-4aa1-b986-49d51f20c414\") " pod="openshift-must-gather-tczvh/crc-debug-p6zrc" Nov 27 00:09:02 crc kubenswrapper[4903]: I1127 00:09:02.156913 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9b153332-9766-4aa1-b986-49d51f20c414-host\") pod \"crc-debug-p6zrc\" (UID: \"9b153332-9766-4aa1-b986-49d51f20c414\") " pod="openshift-must-gather-tczvh/crc-debug-p6zrc" Nov 27 00:09:02 crc kubenswrapper[4903]: I1127 00:09:02.157103 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9b153332-9766-4aa1-b986-49d51f20c414-host\") pod \"crc-debug-p6zrc\" (UID: \"9b153332-9766-4aa1-b986-49d51f20c414\") " pod="openshift-must-gather-tczvh/crc-debug-p6zrc" Nov 27 00:09:02 crc kubenswrapper[4903]: I1127 00:09:02.157110 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hb99j\" (UniqueName: \"kubernetes.io/projected/9b153332-9766-4aa1-b986-49d51f20c414-kube-api-access-hb99j\") pod \"crc-debug-p6zrc\" (UID: \"9b153332-9766-4aa1-b986-49d51f20c414\") " pod="openshift-must-gather-tczvh/crc-debug-p6zrc" Nov 27 00:09:02 crc kubenswrapper[4903]: I1127 00:09:02.187379 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hb99j\" (UniqueName: \"kubernetes.io/projected/9b153332-9766-4aa1-b986-49d51f20c414-kube-api-access-hb99j\") pod \"crc-debug-p6zrc\" (UID: \"9b153332-9766-4aa1-b986-49d51f20c414\") " pod="openshift-must-gather-tczvh/crc-debug-p6zrc" Nov 27 00:09:02 crc kubenswrapper[4903]: I1127 00:09:02.478976 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tczvh/crc-debug-p6zrc" Nov 27 00:09:03 crc kubenswrapper[4903]: I1127 00:09:03.104210 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-tczvh/crc-debug-p6zrc" event={"ID":"9b153332-9766-4aa1-b986-49d51f20c414","Type":"ContainerStarted","Data":"e245ae4d2d05b426261c470e0f4d42df6fea8e0d8484780b70d33cfc6e77cb77"} Nov 27 00:09:03 crc kubenswrapper[4903]: I1127 00:09:03.104582 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-tczvh/crc-debug-p6zrc" event={"ID":"9b153332-9766-4aa1-b986-49d51f20c414","Type":"ContainerStarted","Data":"1d1b8af7110b90cda2650b92b34cffbd8f4cd08441af27641cc3fe4a84620bb8"} Nov 27 00:09:03 crc kubenswrapper[4903]: I1127 00:09:03.133523 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-tczvh/crc-debug-p6zrc" podStartSLOduration=2.133503258 podStartE2EDuration="2.133503258s" podCreationTimestamp="2025-11-27 00:09:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-27 00:09:03.120548382 +0000 UTC m=+6471.810783292" watchObservedRunningTime="2025-11-27 00:09:03.133503258 +0000 UTC m=+6471.823738168" Nov 27 00:09:31 crc kubenswrapper[4903]: I1127 00:09:31.981198 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 27 00:09:31 crc kubenswrapper[4903]: I1127 00:09:31.981621 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 27 00:09:54 crc kubenswrapper[4903]: I1127 00:09:54.669632 4903 generic.go:334] "Generic (PLEG): container finished" podID="9b153332-9766-4aa1-b986-49d51f20c414" containerID="e245ae4d2d05b426261c470e0f4d42df6fea8e0d8484780b70d33cfc6e77cb77" exitCode=0 Nov 27 00:09:54 crc kubenswrapper[4903]: I1127 00:09:54.669735 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-tczvh/crc-debug-p6zrc" event={"ID":"9b153332-9766-4aa1-b986-49d51f20c414","Type":"ContainerDied","Data":"e245ae4d2d05b426261c470e0f4d42df6fea8e0d8484780b70d33cfc6e77cb77"} Nov 27 00:09:55 crc kubenswrapper[4903]: I1127 00:09:55.821011 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tczvh/crc-debug-p6zrc" Nov 27 00:09:55 crc kubenswrapper[4903]: I1127 00:09:55.871579 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-tczvh/crc-debug-p6zrc"] Nov 27 00:09:55 crc kubenswrapper[4903]: I1127 00:09:55.888888 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-tczvh/crc-debug-p6zrc"] Nov 27 00:09:55 crc kubenswrapper[4903]: I1127 00:09:55.892280 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hb99j\" (UniqueName: \"kubernetes.io/projected/9b153332-9766-4aa1-b986-49d51f20c414-kube-api-access-hb99j\") pod \"9b153332-9766-4aa1-b986-49d51f20c414\" (UID: \"9b153332-9766-4aa1-b986-49d51f20c414\") " Nov 27 00:09:55 crc kubenswrapper[4903]: I1127 00:09:55.892440 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9b153332-9766-4aa1-b986-49d51f20c414-host\") pod \"9b153332-9766-4aa1-b986-49d51f20c414\" (UID: \"9b153332-9766-4aa1-b986-49d51f20c414\") " Nov 27 00:09:55 crc kubenswrapper[4903]: I1127 00:09:55.892891 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b153332-9766-4aa1-b986-49d51f20c414-host" (OuterVolumeSpecName: "host") pod "9b153332-9766-4aa1-b986-49d51f20c414" (UID: "9b153332-9766-4aa1-b986-49d51f20c414"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 27 00:09:55 crc kubenswrapper[4903]: I1127 00:09:55.893293 4903 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9b153332-9766-4aa1-b986-49d51f20c414-host\") on node \"crc\" DevicePath \"\"" Nov 27 00:09:55 crc kubenswrapper[4903]: I1127 00:09:55.911679 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b153332-9766-4aa1-b986-49d51f20c414-kube-api-access-hb99j" (OuterVolumeSpecName: "kube-api-access-hb99j") pod "9b153332-9766-4aa1-b986-49d51f20c414" (UID: "9b153332-9766-4aa1-b986-49d51f20c414"). InnerVolumeSpecName "kube-api-access-hb99j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:09:55 crc kubenswrapper[4903]: I1127 00:09:55.995778 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hb99j\" (UniqueName: \"kubernetes.io/projected/9b153332-9766-4aa1-b986-49d51f20c414-kube-api-access-hb99j\") on node \"crc\" DevicePath \"\"" Nov 27 00:09:56 crc kubenswrapper[4903]: I1127 00:09:56.054502 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b153332-9766-4aa1-b986-49d51f20c414" path="/var/lib/kubelet/pods/9b153332-9766-4aa1-b986-49d51f20c414/volumes" Nov 27 00:09:56 crc kubenswrapper[4903]: I1127 00:09:56.699422 4903 scope.go:117] "RemoveContainer" containerID="e245ae4d2d05b426261c470e0f4d42df6fea8e0d8484780b70d33cfc6e77cb77" Nov 27 00:09:56 crc kubenswrapper[4903]: I1127 00:09:56.699487 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tczvh/crc-debug-p6zrc" Nov 27 00:09:57 crc kubenswrapper[4903]: I1127 00:09:57.109359 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-tczvh/crc-debug-znpn4"] Nov 27 00:09:57 crc kubenswrapper[4903]: E1127 00:09:57.110308 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b153332-9766-4aa1-b986-49d51f20c414" containerName="container-00" Nov 27 00:09:57 crc kubenswrapper[4903]: I1127 00:09:57.110327 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b153332-9766-4aa1-b986-49d51f20c414" containerName="container-00" Nov 27 00:09:57 crc kubenswrapper[4903]: I1127 00:09:57.110675 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b153332-9766-4aa1-b986-49d51f20c414" containerName="container-00" Nov 27 00:09:57 crc kubenswrapper[4903]: I1127 00:09:57.111743 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tczvh/crc-debug-znpn4" Nov 27 00:09:57 crc kubenswrapper[4903]: I1127 00:09:57.114136 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-tczvh"/"default-dockercfg-ntvfq" Nov 27 00:09:57 crc kubenswrapper[4903]: I1127 00:09:57.223353 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/880cb0a1-17ff-4636-83df-f27e2f7d1144-host\") pod \"crc-debug-znpn4\" (UID: \"880cb0a1-17ff-4636-83df-f27e2f7d1144\") " pod="openshift-must-gather-tczvh/crc-debug-znpn4" Nov 27 00:09:57 crc kubenswrapper[4903]: I1127 00:09:57.223657 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8ghv\" (UniqueName: \"kubernetes.io/projected/880cb0a1-17ff-4636-83df-f27e2f7d1144-kube-api-access-w8ghv\") pod \"crc-debug-znpn4\" (UID: \"880cb0a1-17ff-4636-83df-f27e2f7d1144\") " pod="openshift-must-gather-tczvh/crc-debug-znpn4" Nov 27 00:09:57 crc kubenswrapper[4903]: I1127 00:09:57.327112 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/880cb0a1-17ff-4636-83df-f27e2f7d1144-host\") pod \"crc-debug-znpn4\" (UID: \"880cb0a1-17ff-4636-83df-f27e2f7d1144\") " pod="openshift-must-gather-tczvh/crc-debug-znpn4" Nov 27 00:09:57 crc kubenswrapper[4903]: I1127 00:09:57.327257 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8ghv\" (UniqueName: \"kubernetes.io/projected/880cb0a1-17ff-4636-83df-f27e2f7d1144-kube-api-access-w8ghv\") pod \"crc-debug-znpn4\" (UID: \"880cb0a1-17ff-4636-83df-f27e2f7d1144\") " pod="openshift-must-gather-tczvh/crc-debug-znpn4" Nov 27 00:09:57 crc kubenswrapper[4903]: I1127 00:09:57.327583 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/880cb0a1-17ff-4636-83df-f27e2f7d1144-host\") pod \"crc-debug-znpn4\" (UID: \"880cb0a1-17ff-4636-83df-f27e2f7d1144\") " pod="openshift-must-gather-tczvh/crc-debug-znpn4" Nov 27 00:09:57 crc kubenswrapper[4903]: I1127 00:09:57.357254 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8ghv\" (UniqueName: \"kubernetes.io/projected/880cb0a1-17ff-4636-83df-f27e2f7d1144-kube-api-access-w8ghv\") pod \"crc-debug-znpn4\" (UID: \"880cb0a1-17ff-4636-83df-f27e2f7d1144\") " pod="openshift-must-gather-tczvh/crc-debug-znpn4" Nov 27 00:09:57 crc kubenswrapper[4903]: I1127 00:09:57.432369 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tczvh/crc-debug-znpn4" Nov 27 00:09:57 crc kubenswrapper[4903]: I1127 00:09:57.711586 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-tczvh/crc-debug-znpn4" event={"ID":"880cb0a1-17ff-4636-83df-f27e2f7d1144","Type":"ContainerStarted","Data":"66896e243ffeb5abadefbb43679ec147c37716d2134c92b45cc387e411809ace"} Nov 27 00:09:58 crc kubenswrapper[4903]: I1127 00:09:58.725951 4903 generic.go:334] "Generic (PLEG): container finished" podID="880cb0a1-17ff-4636-83df-f27e2f7d1144" containerID="05c89e3b3e1feba9f68f1716bd8bff9c48530967306886d3600016a32b954c96" exitCode=0 Nov 27 00:09:58 crc kubenswrapper[4903]: I1127 00:09:58.726070 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-tczvh/crc-debug-znpn4" event={"ID":"880cb0a1-17ff-4636-83df-f27e2f7d1144","Type":"ContainerDied","Data":"05c89e3b3e1feba9f68f1716bd8bff9c48530967306886d3600016a32b954c96"} Nov 27 00:09:59 crc kubenswrapper[4903]: I1127 00:09:59.876337 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tczvh/crc-debug-znpn4" Nov 27 00:09:59 crc kubenswrapper[4903]: I1127 00:09:59.994567 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/880cb0a1-17ff-4636-83df-f27e2f7d1144-host\") pod \"880cb0a1-17ff-4636-83df-f27e2f7d1144\" (UID: \"880cb0a1-17ff-4636-83df-f27e2f7d1144\") " Nov 27 00:09:59 crc kubenswrapper[4903]: I1127 00:09:59.994903 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w8ghv\" (UniqueName: \"kubernetes.io/projected/880cb0a1-17ff-4636-83df-f27e2f7d1144-kube-api-access-w8ghv\") pod \"880cb0a1-17ff-4636-83df-f27e2f7d1144\" (UID: \"880cb0a1-17ff-4636-83df-f27e2f7d1144\") " Nov 27 00:09:59 crc kubenswrapper[4903]: I1127 00:09:59.995219 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/880cb0a1-17ff-4636-83df-f27e2f7d1144-host" (OuterVolumeSpecName: "host") pod "880cb0a1-17ff-4636-83df-f27e2f7d1144" (UID: "880cb0a1-17ff-4636-83df-f27e2f7d1144"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 27 00:09:59 crc kubenswrapper[4903]: I1127 00:09:59.995648 4903 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/880cb0a1-17ff-4636-83df-f27e2f7d1144-host\") on node \"crc\" DevicePath \"\"" Nov 27 00:10:00 crc kubenswrapper[4903]: I1127 00:10:00.000182 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/880cb0a1-17ff-4636-83df-f27e2f7d1144-kube-api-access-w8ghv" (OuterVolumeSpecName: "kube-api-access-w8ghv") pod "880cb0a1-17ff-4636-83df-f27e2f7d1144" (UID: "880cb0a1-17ff-4636-83df-f27e2f7d1144"). InnerVolumeSpecName "kube-api-access-w8ghv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:10:00 crc kubenswrapper[4903]: I1127 00:10:00.097839 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w8ghv\" (UniqueName: \"kubernetes.io/projected/880cb0a1-17ff-4636-83df-f27e2f7d1144-kube-api-access-w8ghv\") on node \"crc\" DevicePath \"\"" Nov 27 00:10:00 crc kubenswrapper[4903]: I1127 00:10:00.787115 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-tczvh/crc-debug-znpn4" event={"ID":"880cb0a1-17ff-4636-83df-f27e2f7d1144","Type":"ContainerDied","Data":"66896e243ffeb5abadefbb43679ec147c37716d2134c92b45cc387e411809ace"} Nov 27 00:10:00 crc kubenswrapper[4903]: I1127 00:10:00.787476 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="66896e243ffeb5abadefbb43679ec147c37716d2134c92b45cc387e411809ace" Nov 27 00:10:00 crc kubenswrapper[4903]: I1127 00:10:00.787561 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tczvh/crc-debug-znpn4" Nov 27 00:10:01 crc kubenswrapper[4903]: I1127 00:10:01.093646 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-tczvh/crc-debug-znpn4"] Nov 27 00:10:01 crc kubenswrapper[4903]: I1127 00:10:01.105077 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-tczvh/crc-debug-znpn4"] Nov 27 00:10:01 crc kubenswrapper[4903]: I1127 00:10:01.982446 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 27 00:10:01 crc kubenswrapper[4903]: I1127 00:10:01.982783 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 27 00:10:01 crc kubenswrapper[4903]: I1127 00:10:01.982846 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 27 00:10:01 crc kubenswrapper[4903]: I1127 00:10:01.983502 4903 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"084380461b71e9cb28e7b6a51fa622a5e26c01262b544c1e57111d81686a4f65"} pod="openshift-machine-config-operator/machine-config-daemon-wjwph" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 27 00:10:01 crc kubenswrapper[4903]: I1127 00:10:01.983572 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" containerID="cri-o://084380461b71e9cb28e7b6a51fa622a5e26c01262b544c1e57111d81686a4f65" gracePeriod=600 Nov 27 00:10:02 crc kubenswrapper[4903]: I1127 00:10:02.041747 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="880cb0a1-17ff-4636-83df-f27e2f7d1144" path="/var/lib/kubelet/pods/880cb0a1-17ff-4636-83df-f27e2f7d1144/volumes" Nov 27 00:10:02 crc kubenswrapper[4903]: I1127 00:10:02.343797 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-tczvh/crc-debug-9xww4"] Nov 27 00:10:02 crc kubenswrapper[4903]: E1127 00:10:02.344785 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="880cb0a1-17ff-4636-83df-f27e2f7d1144" containerName="container-00" Nov 27 00:10:02 crc kubenswrapper[4903]: I1127 00:10:02.344809 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="880cb0a1-17ff-4636-83df-f27e2f7d1144" containerName="container-00" Nov 27 00:10:02 crc kubenswrapper[4903]: I1127 00:10:02.345128 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="880cb0a1-17ff-4636-83df-f27e2f7d1144" containerName="container-00" Nov 27 00:10:02 crc kubenswrapper[4903]: I1127 00:10:02.346112 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tczvh/crc-debug-9xww4" Nov 27 00:10:02 crc kubenswrapper[4903]: I1127 00:10:02.347832 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-tczvh"/"default-dockercfg-ntvfq" Nov 27 00:10:02 crc kubenswrapper[4903]: I1127 00:10:02.448060 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tdzn7\" (UniqueName: \"kubernetes.io/projected/d6048f59-d5be-45f0-98d6-76a9d3cf2595-kube-api-access-tdzn7\") pod \"crc-debug-9xww4\" (UID: \"d6048f59-d5be-45f0-98d6-76a9d3cf2595\") " pod="openshift-must-gather-tczvh/crc-debug-9xww4" Nov 27 00:10:02 crc kubenswrapper[4903]: I1127 00:10:02.448593 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d6048f59-d5be-45f0-98d6-76a9d3cf2595-host\") pod \"crc-debug-9xww4\" (UID: \"d6048f59-d5be-45f0-98d6-76a9d3cf2595\") " pod="openshift-must-gather-tczvh/crc-debug-9xww4" Nov 27 00:10:02 crc kubenswrapper[4903]: I1127 00:10:02.552232 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tdzn7\" (UniqueName: \"kubernetes.io/projected/d6048f59-d5be-45f0-98d6-76a9d3cf2595-kube-api-access-tdzn7\") pod \"crc-debug-9xww4\" (UID: \"d6048f59-d5be-45f0-98d6-76a9d3cf2595\") " pod="openshift-must-gather-tczvh/crc-debug-9xww4" Nov 27 00:10:02 crc kubenswrapper[4903]: I1127 00:10:02.552929 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d6048f59-d5be-45f0-98d6-76a9d3cf2595-host\") pod \"crc-debug-9xww4\" (UID: \"d6048f59-d5be-45f0-98d6-76a9d3cf2595\") " pod="openshift-must-gather-tczvh/crc-debug-9xww4" Nov 27 00:10:02 crc kubenswrapper[4903]: I1127 00:10:02.553188 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d6048f59-d5be-45f0-98d6-76a9d3cf2595-host\") pod \"crc-debug-9xww4\" (UID: \"d6048f59-d5be-45f0-98d6-76a9d3cf2595\") " pod="openshift-must-gather-tczvh/crc-debug-9xww4" Nov 27 00:10:02 crc kubenswrapper[4903]: I1127 00:10:02.573675 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tdzn7\" (UniqueName: \"kubernetes.io/projected/d6048f59-d5be-45f0-98d6-76a9d3cf2595-kube-api-access-tdzn7\") pod \"crc-debug-9xww4\" (UID: \"d6048f59-d5be-45f0-98d6-76a9d3cf2595\") " pod="openshift-must-gather-tczvh/crc-debug-9xww4" Nov 27 00:10:02 crc kubenswrapper[4903]: I1127 00:10:02.666960 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tczvh/crc-debug-9xww4" Nov 27 00:10:02 crc kubenswrapper[4903]: I1127 00:10:02.814964 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-tczvh/crc-debug-9xww4" event={"ID":"d6048f59-d5be-45f0-98d6-76a9d3cf2595","Type":"ContainerStarted","Data":"1a17e603cf777294a7a4d39937b361e0c9baedcbeef40d70eb5e265ad804e184"} Nov 27 00:10:02 crc kubenswrapper[4903]: I1127 00:10:02.819317 4903 generic.go:334] "Generic (PLEG): container finished" podID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerID="084380461b71e9cb28e7b6a51fa622a5e26c01262b544c1e57111d81686a4f65" exitCode=0 Nov 27 00:10:02 crc kubenswrapper[4903]: I1127 00:10:02.819359 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerDied","Data":"084380461b71e9cb28e7b6a51fa622a5e26c01262b544c1e57111d81686a4f65"} Nov 27 00:10:02 crc kubenswrapper[4903]: I1127 00:10:02.819433 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerStarted","Data":"c94c27075d9b691dede6e2bdca4738ac0ca2bb05f39b1b845169635db93d8dc5"} Nov 27 00:10:02 crc kubenswrapper[4903]: I1127 00:10:02.819457 4903 scope.go:117] "RemoveContainer" containerID="02f515062b7ff2bda08e09cb8c72a3611586455ba29e6f79c4fb592e1f750455" Nov 27 00:10:03 crc kubenswrapper[4903]: I1127 00:10:03.838129 4903 generic.go:334] "Generic (PLEG): container finished" podID="d6048f59-d5be-45f0-98d6-76a9d3cf2595" containerID="f8c3d234f9f5bdc76184a85e30727272e43400687c7ffd694d29c1b09419135f" exitCode=0 Nov 27 00:10:03 crc kubenswrapper[4903]: I1127 00:10:03.838413 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-tczvh/crc-debug-9xww4" event={"ID":"d6048f59-d5be-45f0-98d6-76a9d3cf2595","Type":"ContainerDied","Data":"f8c3d234f9f5bdc76184a85e30727272e43400687c7ffd694d29c1b09419135f"} Nov 27 00:10:03 crc kubenswrapper[4903]: I1127 00:10:03.888098 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-tczvh/crc-debug-9xww4"] Nov 27 00:10:03 crc kubenswrapper[4903]: I1127 00:10:03.899610 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-tczvh/crc-debug-9xww4"] Nov 27 00:10:04 crc kubenswrapper[4903]: I1127 00:10:04.984741 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tczvh/crc-debug-9xww4" Nov 27 00:10:05 crc kubenswrapper[4903]: I1127 00:10:05.115297 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d6048f59-d5be-45f0-98d6-76a9d3cf2595-host\") pod \"d6048f59-d5be-45f0-98d6-76a9d3cf2595\" (UID: \"d6048f59-d5be-45f0-98d6-76a9d3cf2595\") " Nov 27 00:10:05 crc kubenswrapper[4903]: I1127 00:10:05.115509 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tdzn7\" (UniqueName: \"kubernetes.io/projected/d6048f59-d5be-45f0-98d6-76a9d3cf2595-kube-api-access-tdzn7\") pod \"d6048f59-d5be-45f0-98d6-76a9d3cf2595\" (UID: \"d6048f59-d5be-45f0-98d6-76a9d3cf2595\") " Nov 27 00:10:05 crc kubenswrapper[4903]: I1127 00:10:05.115828 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d6048f59-d5be-45f0-98d6-76a9d3cf2595-host" (OuterVolumeSpecName: "host") pod "d6048f59-d5be-45f0-98d6-76a9d3cf2595" (UID: "d6048f59-d5be-45f0-98d6-76a9d3cf2595"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 27 00:10:05 crc kubenswrapper[4903]: I1127 00:10:05.116525 4903 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d6048f59-d5be-45f0-98d6-76a9d3cf2595-host\") on node \"crc\" DevicePath \"\"" Nov 27 00:10:05 crc kubenswrapper[4903]: I1127 00:10:05.122438 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d6048f59-d5be-45f0-98d6-76a9d3cf2595-kube-api-access-tdzn7" (OuterVolumeSpecName: "kube-api-access-tdzn7") pod "d6048f59-d5be-45f0-98d6-76a9d3cf2595" (UID: "d6048f59-d5be-45f0-98d6-76a9d3cf2595"). InnerVolumeSpecName "kube-api-access-tdzn7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:10:05 crc kubenswrapper[4903]: I1127 00:10:05.219197 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tdzn7\" (UniqueName: \"kubernetes.io/projected/d6048f59-d5be-45f0-98d6-76a9d3cf2595-kube-api-access-tdzn7\") on node \"crc\" DevicePath \"\"" Nov 27 00:10:05 crc kubenswrapper[4903]: I1127 00:10:05.862076 4903 scope.go:117] "RemoveContainer" containerID="f8c3d234f9f5bdc76184a85e30727272e43400687c7ffd694d29c1b09419135f" Nov 27 00:10:05 crc kubenswrapper[4903]: I1127 00:10:05.862125 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tczvh/crc-debug-9xww4" Nov 27 00:10:06 crc kubenswrapper[4903]: I1127 00:10:06.045607 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d6048f59-d5be-45f0-98d6-76a9d3cf2595" path="/var/lib/kubelet/pods/d6048f59-d5be-45f0-98d6-76a9d3cf2595/volumes" Nov 27 00:10:32 crc kubenswrapper[4903]: I1127 00:10:32.554435 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_e8d0ddee-85d8-40d5-9cfc-d279c65aa4be/aodh-api/0.log" Nov 27 00:10:32 crc kubenswrapper[4903]: I1127 00:10:32.636405 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_e8d0ddee-85d8-40d5-9cfc-d279c65aa4be/aodh-evaluator/0.log" Nov 27 00:10:32 crc kubenswrapper[4903]: I1127 00:10:32.718399 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_e8d0ddee-85d8-40d5-9cfc-d279c65aa4be/aodh-listener/0.log" Nov 27 00:10:32 crc kubenswrapper[4903]: I1127 00:10:32.819360 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_e8d0ddee-85d8-40d5-9cfc-d279c65aa4be/aodh-notifier/0.log" Nov 27 00:10:32 crc kubenswrapper[4903]: I1127 00:10:32.868029 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-86bcb477db-8xtr8_37c314f3-5577-423f-887f-7c551f339c3b/barbican-api/0.log" Nov 27 00:10:32 crc kubenswrapper[4903]: I1127 00:10:32.944425 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-86bcb477db-8xtr8_37c314f3-5577-423f-887f-7c551f339c3b/barbican-api-log/0.log" Nov 27 00:10:33 crc kubenswrapper[4903]: I1127 00:10:33.002508 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-79464fcfdb-twqnx_8824059c-5e2d-4ce5-b224-fc144593d08d/barbican-keystone-listener/0.log" Nov 27 00:10:33 crc kubenswrapper[4903]: I1127 00:10:33.249528 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-79464fcfdb-twqnx_8824059c-5e2d-4ce5-b224-fc144593d08d/barbican-keystone-listener-log/0.log" Nov 27 00:10:33 crc kubenswrapper[4903]: I1127 00:10:33.254682 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-58b8c46b65-kdwlj_40944cbe-7c1b-43b0-bed6-28f9490a0d5f/barbican-worker/0.log" Nov 27 00:10:33 crc kubenswrapper[4903]: I1127 00:10:33.311870 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-58b8c46b65-kdwlj_40944cbe-7c1b-43b0-bed6-28f9490a0d5f/barbican-worker-log/0.log" Nov 27 00:10:33 crc kubenswrapper[4903]: I1127 00:10:33.494053 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-2qsv2_e9c5ea47-6ef3-44d4-b710-d11a2367448e/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:10:33 crc kubenswrapper[4903]: I1127 00:10:33.635542 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_8c528520-edac-42d3-a81c-f5aca4d05266/ceilometer-central-agent/0.log" Nov 27 00:10:33 crc kubenswrapper[4903]: I1127 00:10:33.682016 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_8c528520-edac-42d3-a81c-f5aca4d05266/ceilometer-notification-agent/0.log" Nov 27 00:10:33 crc kubenswrapper[4903]: I1127 00:10:33.722108 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_8c528520-edac-42d3-a81c-f5aca4d05266/sg-core/0.log" Nov 27 00:10:33 crc kubenswrapper[4903]: I1127 00:10:33.739786 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_8c528520-edac-42d3-a81c-f5aca4d05266/proxy-httpd/0.log" Nov 27 00:10:33 crc kubenswrapper[4903]: I1127 00:10:33.930461 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_172525bd-6c7f-4e76-b7b4-47c937c33a14/cinder-api-log/0.log" Nov 27 00:10:34 crc kubenswrapper[4903]: I1127 00:10:34.023612 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_172525bd-6c7f-4e76-b7b4-47c937c33a14/cinder-api/0.log" Nov 27 00:10:34 crc kubenswrapper[4903]: I1127 00:10:34.050357 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-db-purge-29403361-ffwm8_bed02ce7-86ba-4f32-aa9d-5517cca15371/cinder-db-purge/0.log" Nov 27 00:10:34 crc kubenswrapper[4903]: I1127 00:10:34.249625 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_40127bc0-c09b-4c3f-af93-cdfcaee9d36e/cinder-scheduler/0.log" Nov 27 00:10:34 crc kubenswrapper[4903]: I1127 00:10:34.328959 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_40127bc0-c09b-4c3f-af93-cdfcaee9d36e/probe/0.log" Nov 27 00:10:34 crc kubenswrapper[4903]: I1127 00:10:34.412938 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-65pnw_59398ac1-b8ae-47b3-b00e-f9f245b4eb27/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:10:34 crc kubenswrapper[4903]: I1127 00:10:34.526453 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-dg882_e186f675-8a6e-4e8d-8531-247e10617355/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:10:34 crc kubenswrapper[4903]: I1127 00:10:34.627511 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6f6df4f56c-4fkw5_9af2401e-79f8-4a02-be46-995607766071/init/0.log" Nov 27 00:10:34 crc kubenswrapper[4903]: I1127 00:10:34.853940 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6f6df4f56c-4fkw5_9af2401e-79f8-4a02-be46-995607766071/init/0.log" Nov 27 00:10:34 crc kubenswrapper[4903]: I1127 00:10:34.864141 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6f6df4f56c-4fkw5_9af2401e-79f8-4a02-be46-995607766071/dnsmasq-dns/0.log" Nov 27 00:10:34 crc kubenswrapper[4903]: I1127 00:10:34.908259 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-k665c_8115e93a-72c0-4022-a687-6b58fb3c45ab/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:10:35 crc kubenswrapper[4903]: I1127 00:10:35.236420 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-db-purge-29403361-vv6q4_d8ce0598-1d46-4ddb-b383-4b66b3296d4b/glance-dbpurge/0.log" Nov 27 00:10:35 crc kubenswrapper[4903]: I1127 00:10:35.377929 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_e9527a05-6356-4ee8-8e07-5557453ad8c2/glance-log/0.log" Nov 27 00:10:35 crc kubenswrapper[4903]: I1127 00:10:35.383022 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_e9527a05-6356-4ee8-8e07-5557453ad8c2/glance-httpd/0.log" Nov 27 00:10:35 crc kubenswrapper[4903]: I1127 00:10:35.525963 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_d500fa23-7825-4dde-95b4-dce1b93b24cb/glance-httpd/0.log" Nov 27 00:10:35 crc kubenswrapper[4903]: I1127 00:10:35.604158 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_d500fa23-7825-4dde-95b4-dce1b93b24cb/glance-log/0.log" Nov 27 00:10:36 crc kubenswrapper[4903]: I1127 00:10:36.666238 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-engine-66bc977bcf-w4wg7_cc6fa80e-1db0-4944-9c07-04df732f4914/heat-engine/0.log" Nov 27 00:10:36 crc kubenswrapper[4903]: I1127 00:10:36.859010 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-wtsn6_ab43bf25-eae9-472d-80d9-0e91478c8302/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:10:36 crc kubenswrapper[4903]: I1127 00:10:36.936054 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-wh5kt_31e264f2-c649-43e0-af90-ca65e2cb84da/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:10:37 crc kubenswrapper[4903]: I1127 00:10:37.082094 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29403301-ljvz7_be70a483-c763-4980-a995-61d1a6f5573e/keystone-cron/0.log" Nov 27 00:10:37 crc kubenswrapper[4903]: I1127 00:10:37.264602 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-cfnapi-766bc64666-vhfgd_3abe2357-63af-453d-9e93-3d087275e569/heat-cfnapi/0.log" Nov 27 00:10:37 crc kubenswrapper[4903]: I1127 00:10:37.306833 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-api-867c798764-xfxnw_e0f6d1e3-0e99-495c-a8da-005cc8d05e25/heat-api/0.log" Nov 27 00:10:37 crc kubenswrapper[4903]: I1127 00:10:37.340920 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29403361-mfzw7_ce8e3d84-5904-40d3-99fd-0847d2f205f1/keystone-cron/0.log" Nov 27 00:10:37 crc kubenswrapper[4903]: I1127 00:10:37.532080 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_0f92d7db-9155-4bdc-8285-29091382434c/kube-state-metrics/0.log" Nov 27 00:10:37 crc kubenswrapper[4903]: I1127 00:10:37.753244 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-8jt67_402f509f-516b-446b-a5e8-f42c6aa65ed7/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:10:37 crc kubenswrapper[4903]: I1127 00:10:37.844952 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_logging-edpm-deployment-openstack-edpm-ipam-bg8bq_c177b1fa-8c5c-43f2-bb1d-c1695ccf0050/logging-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:10:37 crc kubenswrapper[4903]: I1127 00:10:37.896724 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-fcdf5f968-7ppxk_b3f65d25-6e7d-4b8e-99e1-c75c39abb982/keystone-api/0.log" Nov 27 00:10:38 crc kubenswrapper[4903]: I1127 00:10:38.044798 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mysqld-exporter-0_a0677cff-9cf4-4eba-bb4b-4fea82d38f71/mysqld-exporter/0.log" Nov 27 00:10:38 crc kubenswrapper[4903]: I1127 00:10:38.558203 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-qssnm"] Nov 27 00:10:38 crc kubenswrapper[4903]: E1127 00:10:38.559080 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6048f59-d5be-45f0-98d6-76a9d3cf2595" containerName="container-00" Nov 27 00:10:38 crc kubenswrapper[4903]: I1127 00:10:38.559100 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6048f59-d5be-45f0-98d6-76a9d3cf2595" containerName="container-00" Nov 27 00:10:38 crc kubenswrapper[4903]: I1127 00:10:38.559420 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="d6048f59-d5be-45f0-98d6-76a9d3cf2595" containerName="container-00" Nov 27 00:10:38 crc kubenswrapper[4903]: I1127 00:10:38.561598 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qssnm" Nov 27 00:10:38 crc kubenswrapper[4903]: I1127 00:10:38.570389 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qssnm"] Nov 27 00:10:38 crc kubenswrapper[4903]: I1127 00:10:38.672643 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aacb9593-5b91-4faf-9bc1-2021c35ca0e5-catalog-content\") pod \"redhat-operators-qssnm\" (UID: \"aacb9593-5b91-4faf-9bc1-2021c35ca0e5\") " pod="openshift-marketplace/redhat-operators-qssnm" Nov 27 00:10:38 crc kubenswrapper[4903]: I1127 00:10:38.672827 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aacb9593-5b91-4faf-9bc1-2021c35ca0e5-utilities\") pod \"redhat-operators-qssnm\" (UID: \"aacb9593-5b91-4faf-9bc1-2021c35ca0e5\") " pod="openshift-marketplace/redhat-operators-qssnm" Nov 27 00:10:38 crc kubenswrapper[4903]: I1127 00:10:38.672900 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmg4c\" (UniqueName: \"kubernetes.io/projected/aacb9593-5b91-4faf-9bc1-2021c35ca0e5-kube-api-access-qmg4c\") pod \"redhat-operators-qssnm\" (UID: \"aacb9593-5b91-4faf-9bc1-2021c35ca0e5\") " pod="openshift-marketplace/redhat-operators-qssnm" Nov 27 00:10:38 crc kubenswrapper[4903]: I1127 00:10:38.746674 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-ht8qb_c8b7f7a3-07d3-46c8-a5e2-0b08c743d466/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:10:38 crc kubenswrapper[4903]: I1127 00:10:38.774514 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aacb9593-5b91-4faf-9bc1-2021c35ca0e5-catalog-content\") pod \"redhat-operators-qssnm\" (UID: \"aacb9593-5b91-4faf-9bc1-2021c35ca0e5\") " pod="openshift-marketplace/redhat-operators-qssnm" Nov 27 00:10:38 crc kubenswrapper[4903]: I1127 00:10:38.774783 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aacb9593-5b91-4faf-9bc1-2021c35ca0e5-utilities\") pod \"redhat-operators-qssnm\" (UID: \"aacb9593-5b91-4faf-9bc1-2021c35ca0e5\") " pod="openshift-marketplace/redhat-operators-qssnm" Nov 27 00:10:38 crc kubenswrapper[4903]: I1127 00:10:38.774899 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmg4c\" (UniqueName: \"kubernetes.io/projected/aacb9593-5b91-4faf-9bc1-2021c35ca0e5-kube-api-access-qmg4c\") pod \"redhat-operators-qssnm\" (UID: \"aacb9593-5b91-4faf-9bc1-2021c35ca0e5\") " pod="openshift-marketplace/redhat-operators-qssnm" Nov 27 00:10:38 crc kubenswrapper[4903]: I1127 00:10:38.775184 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aacb9593-5b91-4faf-9bc1-2021c35ca0e5-catalog-content\") pod \"redhat-operators-qssnm\" (UID: \"aacb9593-5b91-4faf-9bc1-2021c35ca0e5\") " pod="openshift-marketplace/redhat-operators-qssnm" Nov 27 00:10:38 crc kubenswrapper[4903]: I1127 00:10:38.775551 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aacb9593-5b91-4faf-9bc1-2021c35ca0e5-utilities\") pod \"redhat-operators-qssnm\" (UID: \"aacb9593-5b91-4faf-9bc1-2021c35ca0e5\") " pod="openshift-marketplace/redhat-operators-qssnm" Nov 27 00:10:38 crc kubenswrapper[4903]: I1127 00:10:38.784346 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5bbd968879-hmnnt_710e7305-de14-46ea-8cc9-1cbc9dcf0a44/neutron-api/0.log" Nov 27 00:10:38 crc kubenswrapper[4903]: I1127 00:10:38.809863 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5bbd968879-hmnnt_710e7305-de14-46ea-8cc9-1cbc9dcf0a44/neutron-httpd/0.log" Nov 27 00:10:38 crc kubenswrapper[4903]: I1127 00:10:38.839630 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmg4c\" (UniqueName: \"kubernetes.io/projected/aacb9593-5b91-4faf-9bc1-2021c35ca0e5-kube-api-access-qmg4c\") pod \"redhat-operators-qssnm\" (UID: \"aacb9593-5b91-4faf-9bc1-2021c35ca0e5\") " pod="openshift-marketplace/redhat-operators-qssnm" Nov 27 00:10:38 crc kubenswrapper[4903]: I1127 00:10:38.885457 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qssnm" Nov 27 00:10:39 crc kubenswrapper[4903]: I1127 00:10:39.626835 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_47dfaf47-3f8b-4355-8c56-a0955f49d95f/nova-cell0-conductor-conductor/0.log" Nov 27 00:10:39 crc kubenswrapper[4903]: I1127 00:10:39.667612 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qssnm"] Nov 27 00:10:39 crc kubenswrapper[4903]: I1127 00:10:39.727494 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-db-purge-29403360-wlbzs_79b66965-3ee8-42b9-8526-c73cbd4ee362/nova-manage/0.log" Nov 27 00:10:40 crc kubenswrapper[4903]: I1127 00:10:40.049511 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_d23a575d-55d9-4805-bfee-09f92b0b97ef/nova-api-log/0.log" Nov 27 00:10:40 crc kubenswrapper[4903]: I1127 00:10:40.304181 4903 generic.go:334] "Generic (PLEG): container finished" podID="aacb9593-5b91-4faf-9bc1-2021c35ca0e5" containerID="0a107ab52370cfdcecad45d90a3073f86ce1fd56f38d06904aeaeaaf61d891ac" exitCode=0 Nov 27 00:10:40 crc kubenswrapper[4903]: I1127 00:10:40.304454 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qssnm" event={"ID":"aacb9593-5b91-4faf-9bc1-2021c35ca0e5","Type":"ContainerDied","Data":"0a107ab52370cfdcecad45d90a3073f86ce1fd56f38d06904aeaeaaf61d891ac"} Nov 27 00:10:40 crc kubenswrapper[4903]: I1127 00:10:40.304536 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qssnm" event={"ID":"aacb9593-5b91-4faf-9bc1-2021c35ca0e5","Type":"ContainerStarted","Data":"dd05bc9239d29c96ee4feeb75651e169ade4af6a72ca5faaf2a2d17f28b59813"} Nov 27 00:10:40 crc kubenswrapper[4903]: I1127 00:10:40.307001 4903 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 27 00:10:40 crc kubenswrapper[4903]: I1127 00:10:40.367417 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_7d4be6e3-d909-4e4f-b5a0-3c949c02421a/nova-cell1-conductor-conductor/0.log" Nov 27 00:10:40 crc kubenswrapper[4903]: I1127 00:10:40.397510 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-db-purge-29403360-tg276_a4ddd645-4995-419b-a345-a9ef14f5b01d/nova-manage/0.log" Nov 27 00:10:40 crc kubenswrapper[4903]: I1127 00:10:40.729012 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_d23a575d-55d9-4805-bfee-09f92b0b97ef/nova-api-api/0.log" Nov 27 00:10:40 crc kubenswrapper[4903]: I1127 00:10:40.806348 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_81026daf-ddcc-4599-8458-b8280d48c920/nova-cell1-novncproxy-novncproxy/0.log" Nov 27 00:10:40 crc kubenswrapper[4903]: I1127 00:10:40.895213 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-r5l7j_be764009-e30d-4394-b38c-83996b86b9e1/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:10:41 crc kubenswrapper[4903]: I1127 00:10:41.130118 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_ecafb017-7ef9-492e-95d5-d297ec3c9725/nova-metadata-log/0.log" Nov 27 00:10:41 crc kubenswrapper[4903]: I1127 00:10:41.463653 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_646d5a7a-f188-4dc2-99ac-24c16bcf59fc/nova-scheduler-scheduler/0.log" Nov 27 00:10:41 crc kubenswrapper[4903]: I1127 00:10:41.486528 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_aabcbcd0-4cc0-495d-b059-6b8722c47aa1/mysql-bootstrap/0.log" Nov 27 00:10:41 crc kubenswrapper[4903]: I1127 00:10:41.729798 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_aabcbcd0-4cc0-495d-b059-6b8722c47aa1/mysql-bootstrap/0.log" Nov 27 00:10:41 crc kubenswrapper[4903]: I1127 00:10:41.758084 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_aabcbcd0-4cc0-495d-b059-6b8722c47aa1/galera/0.log" Nov 27 00:10:42 crc kubenswrapper[4903]: I1127 00:10:42.061260 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_b1969a76-48dc-4a53-8ee9-f9b5a5670e30/mysql-bootstrap/0.log" Nov 27 00:10:42 crc kubenswrapper[4903]: I1127 00:10:42.120167 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_b1969a76-48dc-4a53-8ee9-f9b5a5670e30/mysql-bootstrap/0.log" Nov 27 00:10:42 crc kubenswrapper[4903]: I1127 00:10:42.174518 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_b1969a76-48dc-4a53-8ee9-f9b5a5670e30/galera/0.log" Nov 27 00:10:42 crc kubenswrapper[4903]: I1127 00:10:42.377013 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_0057fb69-c2f2-4f5f-ad83-5fd16fcc99b0/openstackclient/0.log" Nov 27 00:10:42 crc kubenswrapper[4903]: I1127 00:10:42.842417 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-ksn6g_d95087b9-4f77-4f65-b7bd-b799e673de6f/openstack-network-exporter/0.log" Nov 27 00:10:42 crc kubenswrapper[4903]: I1127 00:10:42.855677 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-kzb8j_1aa29ea2-aaab-435e-9995-41a5f137be03/ovn-controller/0.log" Nov 27 00:10:43 crc kubenswrapper[4903]: I1127 00:10:43.139868 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-mv2r4_bfc848b9-8183-4fb5-b8ce-d9542294079f/ovsdb-server-init/0.log" Nov 27 00:10:43 crc kubenswrapper[4903]: I1127 00:10:43.376354 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-mv2r4_bfc848b9-8183-4fb5-b8ce-d9542294079f/ovsdb-server-init/0.log" Nov 27 00:10:43 crc kubenswrapper[4903]: I1127 00:10:43.390112 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-mv2r4_bfc848b9-8183-4fb5-b8ce-d9542294079f/ovsdb-server/0.log" Nov 27 00:10:43 crc kubenswrapper[4903]: I1127 00:10:43.403588 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-mv2r4_bfc848b9-8183-4fb5-b8ce-d9542294079f/ovs-vswitchd/0.log" Nov 27 00:10:43 crc kubenswrapper[4903]: I1127 00:10:43.659212 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-wcwtk_d791fd3c-48a9-44e4-85e2-9e0f088ecb6c/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:10:43 crc kubenswrapper[4903]: I1127 00:10:43.862460 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_16d5105b-5e4e-4806-a873-a79e1aaccc68/ovn-northd/0.log" Nov 27 00:10:43 crc kubenswrapper[4903]: I1127 00:10:43.871773 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_16d5105b-5e4e-4806-a873-a79e1aaccc68/openstack-network-exporter/0.log" Nov 27 00:10:44 crc kubenswrapper[4903]: I1127 00:10:44.025393 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_ecafb017-7ef9-492e-95d5-d297ec3c9725/nova-metadata-metadata/0.log" Nov 27 00:10:44 crc kubenswrapper[4903]: I1127 00:10:44.067753 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e/openstack-network-exporter/0.log" Nov 27 00:10:44 crc kubenswrapper[4903]: I1127 00:10:44.128241 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_ab9571fb-cb73-43ba-b0f3-fd1ef6b21a2e/ovsdbserver-nb/0.log" Nov 27 00:10:44 crc kubenswrapper[4903]: I1127 00:10:44.255492 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_4286478b-1146-4f96-8819-753c3f6a6158/openstack-network-exporter/0.log" Nov 27 00:10:44 crc kubenswrapper[4903]: I1127 00:10:44.332669 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_4286478b-1146-4f96-8819-753c3f6a6158/ovsdbserver-sb/0.log" Nov 27 00:10:44 crc kubenswrapper[4903]: I1127 00:10:44.823430 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_a1ef57dd-556d-40c3-8691-c7e55171a7a6/init-config-reloader/0.log" Nov 27 00:10:44 crc kubenswrapper[4903]: I1127 00:10:44.965207 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_a1ef57dd-556d-40c3-8691-c7e55171a7a6/init-config-reloader/0.log" Nov 27 00:10:45 crc kubenswrapper[4903]: I1127 00:10:45.006166 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-747d9754b8-8kqq9_74c08acb-478e-442a-b66d-5f29e75790f4/placement-api/0.log" Nov 27 00:10:45 crc kubenswrapper[4903]: I1127 00:10:45.051080 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_a1ef57dd-556d-40c3-8691-c7e55171a7a6/config-reloader/0.log" Nov 27 00:10:45 crc kubenswrapper[4903]: I1127 00:10:45.055789 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-747d9754b8-8kqq9_74c08acb-478e-442a-b66d-5f29e75790f4/placement-log/0.log" Nov 27 00:10:45 crc kubenswrapper[4903]: I1127 00:10:45.198966 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_a1ef57dd-556d-40c3-8691-c7e55171a7a6/prometheus/0.log" Nov 27 00:10:45 crc kubenswrapper[4903]: I1127 00:10:45.217904 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_a1ef57dd-556d-40c3-8691-c7e55171a7a6/thanos-sidecar/0.log" Nov 27 00:10:45 crc kubenswrapper[4903]: I1127 00:10:45.300679 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_f32ba682-7919-4290-adff-40b16ea07fed/setup-container/0.log" Nov 27 00:10:45 crc kubenswrapper[4903]: I1127 00:10:45.637056 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_f32ba682-7919-4290-adff-40b16ea07fed/setup-container/0.log" Nov 27 00:10:45 crc kubenswrapper[4903]: I1127 00:10:45.637331 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_5c06e745-2d71-48e5-9cf2-e361471b9b74/setup-container/0.log" Nov 27 00:10:45 crc kubenswrapper[4903]: I1127 00:10:45.725784 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_f32ba682-7919-4290-adff-40b16ea07fed/rabbitmq/0.log" Nov 27 00:10:45 crc kubenswrapper[4903]: I1127 00:10:45.910782 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_5c06e745-2d71-48e5-9cf2-e361471b9b74/setup-container/0.log" Nov 27 00:10:45 crc kubenswrapper[4903]: I1127 00:10:45.993817 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-qtzb6_f442214b-4e84-4d9a-aa2c-9c3ae673ed4d/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:10:46 crc kubenswrapper[4903]: I1127 00:10:46.044169 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_5c06e745-2d71-48e5-9cf2-e361471b9b74/rabbitmq/0.log" Nov 27 00:10:46 crc kubenswrapper[4903]: I1127 00:10:46.276483 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-s8hcp_f4801ee8-4d4e-4459-8289-60e5db96a3b9/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:10:46 crc kubenswrapper[4903]: I1127 00:10:46.305410 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-24pb2_154ea937-525e-406f-bef0-ffd2c360d7e1/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:10:46 crc kubenswrapper[4903]: I1127 00:10:46.557614 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-sgr64_f8164f4b-1f48-4f38-810b-3a3b636c48ed/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:10:46 crc kubenswrapper[4903]: I1127 00:10:46.691145 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-wrjtk_078bfb36-1f57-4173-b01a-cc7a6e3862dc/ssh-known-hosts-edpm-deployment/0.log" Nov 27 00:10:47 crc kubenswrapper[4903]: I1127 00:10:47.060716 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-76df48858c-p4q7x_73028630-97ff-425e-9ac8-1b30f1c834c4/proxy-server/0.log" Nov 27 00:10:47 crc kubenswrapper[4903]: I1127 00:10:47.198618 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-wp2ng_be81f20b-b9ca-44bf-8aad-2cd7a10e44cc/swift-ring-rebalance/0.log" Nov 27 00:10:47 crc kubenswrapper[4903]: I1127 00:10:47.276757 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f34b822-e8fa-4f6d-b793-01d0e80ccb06/account-auditor/0.log" Nov 27 00:10:47 crc kubenswrapper[4903]: I1127 00:10:47.356366 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-76df48858c-p4q7x_73028630-97ff-425e-9ac8-1b30f1c834c4/proxy-httpd/0.log" Nov 27 00:10:47 crc kubenswrapper[4903]: I1127 00:10:47.534435 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f34b822-e8fa-4f6d-b793-01d0e80ccb06/account-reaper/0.log" Nov 27 00:10:47 crc kubenswrapper[4903]: I1127 00:10:47.566268 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f34b822-e8fa-4f6d-b793-01d0e80ccb06/container-auditor/0.log" Nov 27 00:10:47 crc kubenswrapper[4903]: I1127 00:10:47.700093 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f34b822-e8fa-4f6d-b793-01d0e80ccb06/account-replicator/0.log" Nov 27 00:10:47 crc kubenswrapper[4903]: I1127 00:10:47.761485 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f34b822-e8fa-4f6d-b793-01d0e80ccb06/account-server/0.log" Nov 27 00:10:47 crc kubenswrapper[4903]: I1127 00:10:47.806441 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f34b822-e8fa-4f6d-b793-01d0e80ccb06/container-server/0.log" Nov 27 00:10:47 crc kubenswrapper[4903]: I1127 00:10:47.891745 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f34b822-e8fa-4f6d-b793-01d0e80ccb06/container-replicator/0.log" Nov 27 00:10:48 crc kubenswrapper[4903]: I1127 00:10:48.021577 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f34b822-e8fa-4f6d-b793-01d0e80ccb06/container-updater/0.log" Nov 27 00:10:48 crc kubenswrapper[4903]: I1127 00:10:48.140737 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f34b822-e8fa-4f6d-b793-01d0e80ccb06/object-auditor/0.log" Nov 27 00:10:48 crc kubenswrapper[4903]: I1127 00:10:48.162892 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f34b822-e8fa-4f6d-b793-01d0e80ccb06/object-expirer/0.log" Nov 27 00:10:48 crc kubenswrapper[4903]: I1127 00:10:48.256635 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f34b822-e8fa-4f6d-b793-01d0e80ccb06/object-replicator/0.log" Nov 27 00:10:48 crc kubenswrapper[4903]: I1127 00:10:48.289305 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f34b822-e8fa-4f6d-b793-01d0e80ccb06/object-server/0.log" Nov 27 00:10:48 crc kubenswrapper[4903]: I1127 00:10:48.437861 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f34b822-e8fa-4f6d-b793-01d0e80ccb06/rsync/0.log" Nov 27 00:10:48 crc kubenswrapper[4903]: I1127 00:10:48.526649 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f34b822-e8fa-4f6d-b793-01d0e80ccb06/swift-recon-cron/0.log" Nov 27 00:10:48 crc kubenswrapper[4903]: I1127 00:10:48.602949 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_1f34b822-e8fa-4f6d-b793-01d0e80ccb06/object-updater/0.log" Nov 27 00:10:48 crc kubenswrapper[4903]: I1127 00:10:48.969684 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-4jgbk_78703fde-a3cc-4241-940e-f92a638f8549/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:10:48 crc kubenswrapper[4903]: I1127 00:10:48.971029 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-power-monitoring-edpm-deployment-openstack-edpm-68wq5_5582dbe8-0a07-4c5f-9054-5e0bc32c2819/telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:10:49 crc kubenswrapper[4903]: I1127 00:10:49.195567 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_27638dee-c020-4daa-a79a-5acf5e013899/test-operator-logs-container/0.log" Nov 27 00:10:49 crc kubenswrapper[4903]: I1127 00:10:49.857876 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-5nwk2_5b5ab3d3-0223-4b0f-ab25-785af487d360/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 27 00:10:50 crc kubenswrapper[4903]: I1127 00:10:50.469341 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_47c0a41f-61f3-4e6c-8367-a25c5a75d02b/tempest-tests-tempest-tests-runner/0.log" Nov 27 00:10:58 crc kubenswrapper[4903]: I1127 00:10:58.668164 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_6fd08b11-1328-47a3-82a3-286d70df4394/memcached/0.log" Nov 27 00:11:01 crc kubenswrapper[4903]: I1127 00:11:01.714876 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4zrf7"] Nov 27 00:11:01 crc kubenswrapper[4903]: I1127 00:11:01.718452 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4zrf7" Nov 27 00:11:01 crc kubenswrapper[4903]: I1127 00:11:01.729671 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4zrf7"] Nov 27 00:11:01 crc kubenswrapper[4903]: I1127 00:11:01.822174 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jnw6b\" (UniqueName: \"kubernetes.io/projected/6f3daec5-d161-457e-99cd-1b1f43a0ccf8-kube-api-access-jnw6b\") pod \"redhat-marketplace-4zrf7\" (UID: \"6f3daec5-d161-457e-99cd-1b1f43a0ccf8\") " pod="openshift-marketplace/redhat-marketplace-4zrf7" Nov 27 00:11:01 crc kubenswrapper[4903]: I1127 00:11:01.822298 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f3daec5-d161-457e-99cd-1b1f43a0ccf8-utilities\") pod \"redhat-marketplace-4zrf7\" (UID: \"6f3daec5-d161-457e-99cd-1b1f43a0ccf8\") " pod="openshift-marketplace/redhat-marketplace-4zrf7" Nov 27 00:11:01 crc kubenswrapper[4903]: I1127 00:11:01.822408 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f3daec5-d161-457e-99cd-1b1f43a0ccf8-catalog-content\") pod \"redhat-marketplace-4zrf7\" (UID: \"6f3daec5-d161-457e-99cd-1b1f43a0ccf8\") " pod="openshift-marketplace/redhat-marketplace-4zrf7" Nov 27 00:11:01 crc kubenswrapper[4903]: I1127 00:11:01.924606 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jnw6b\" (UniqueName: \"kubernetes.io/projected/6f3daec5-d161-457e-99cd-1b1f43a0ccf8-kube-api-access-jnw6b\") pod \"redhat-marketplace-4zrf7\" (UID: \"6f3daec5-d161-457e-99cd-1b1f43a0ccf8\") " pod="openshift-marketplace/redhat-marketplace-4zrf7" Nov 27 00:11:01 crc kubenswrapper[4903]: I1127 00:11:01.924745 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f3daec5-d161-457e-99cd-1b1f43a0ccf8-utilities\") pod \"redhat-marketplace-4zrf7\" (UID: \"6f3daec5-d161-457e-99cd-1b1f43a0ccf8\") " pod="openshift-marketplace/redhat-marketplace-4zrf7" Nov 27 00:11:01 crc kubenswrapper[4903]: I1127 00:11:01.924880 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f3daec5-d161-457e-99cd-1b1f43a0ccf8-catalog-content\") pod \"redhat-marketplace-4zrf7\" (UID: \"6f3daec5-d161-457e-99cd-1b1f43a0ccf8\") " pod="openshift-marketplace/redhat-marketplace-4zrf7" Nov 27 00:11:01 crc kubenswrapper[4903]: I1127 00:11:01.925279 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f3daec5-d161-457e-99cd-1b1f43a0ccf8-utilities\") pod \"redhat-marketplace-4zrf7\" (UID: \"6f3daec5-d161-457e-99cd-1b1f43a0ccf8\") " pod="openshift-marketplace/redhat-marketplace-4zrf7" Nov 27 00:11:01 crc kubenswrapper[4903]: I1127 00:11:01.925730 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f3daec5-d161-457e-99cd-1b1f43a0ccf8-catalog-content\") pod \"redhat-marketplace-4zrf7\" (UID: \"6f3daec5-d161-457e-99cd-1b1f43a0ccf8\") " pod="openshift-marketplace/redhat-marketplace-4zrf7" Nov 27 00:11:01 crc kubenswrapper[4903]: I1127 00:11:01.970856 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jnw6b\" (UniqueName: \"kubernetes.io/projected/6f3daec5-d161-457e-99cd-1b1f43a0ccf8-kube-api-access-jnw6b\") pod \"redhat-marketplace-4zrf7\" (UID: \"6f3daec5-d161-457e-99cd-1b1f43a0ccf8\") " pod="openshift-marketplace/redhat-marketplace-4zrf7" Nov 27 00:11:02 crc kubenswrapper[4903]: I1127 00:11:02.038463 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4zrf7" Nov 27 00:11:17 crc kubenswrapper[4903]: E1127 00:11:17.968488 4903 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 27 00:11:18 crc kubenswrapper[4903]: E1127 00:11:18.049244 4903 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qmg4c,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-qssnm_openshift-marketplace(aacb9593-5b91-4faf-9bc1-2021c35ca0e5): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 27 00:11:18 crc kubenswrapper[4903]: E1127 00:11:18.050798 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-qssnm" podUID="aacb9593-5b91-4faf-9bc1-2021c35ca0e5" Nov 27 00:11:18 crc kubenswrapper[4903]: I1127 00:11:18.827374 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4zrf7"] Nov 27 00:11:18 crc kubenswrapper[4903]: I1127 00:11:18.938871 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4zrf7" event={"ID":"6f3daec5-d161-457e-99cd-1b1f43a0ccf8","Type":"ContainerStarted","Data":"01c8cd506d5ee1649aaf465d457706a86319e4b30c161d1a2e1afdb01f249778"} Nov 27 00:11:18 crc kubenswrapper[4903]: E1127 00:11:18.941078 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-qssnm" podUID="aacb9593-5b91-4faf-9bc1-2021c35ca0e5" Nov 27 00:11:19 crc kubenswrapper[4903]: I1127 00:11:19.423919 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw_34b7b696-e29f-43e0-8186-9ca0219ab924/util/0.log" Nov 27 00:11:19 crc kubenswrapper[4903]: I1127 00:11:19.514244 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw_34b7b696-e29f-43e0-8186-9ca0219ab924/pull/0.log" Nov 27 00:11:19 crc kubenswrapper[4903]: I1127 00:11:19.518424 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw_34b7b696-e29f-43e0-8186-9ca0219ab924/util/0.log" Nov 27 00:11:19 crc kubenswrapper[4903]: I1127 00:11:19.620270 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw_34b7b696-e29f-43e0-8186-9ca0219ab924/pull/0.log" Nov 27 00:11:19 crc kubenswrapper[4903]: I1127 00:11:19.785401 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw_34b7b696-e29f-43e0-8186-9ca0219ab924/util/0.log" Nov 27 00:11:19 crc kubenswrapper[4903]: I1127 00:11:19.875992 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw_34b7b696-e29f-43e0-8186-9ca0219ab924/pull/0.log" Nov 27 00:11:19 crc kubenswrapper[4903]: I1127 00:11:19.891896 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_20cc3ed3055def167235da9ca58b08c0013e23634143f2b0113cc7d2a62k6xw_34b7b696-e29f-43e0-8186-9ca0219ab924/extract/0.log" Nov 27 00:11:19 crc kubenswrapper[4903]: I1127 00:11:19.955645 4903 generic.go:334] "Generic (PLEG): container finished" podID="6f3daec5-d161-457e-99cd-1b1f43a0ccf8" containerID="b596ec6b1d2cbd89ad6de9883533818acd5873ae5aa2d94c7d2b799f507a515f" exitCode=0 Nov 27 00:11:19 crc kubenswrapper[4903]: I1127 00:11:19.956009 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4zrf7" event={"ID":"6f3daec5-d161-457e-99cd-1b1f43a0ccf8","Type":"ContainerDied","Data":"b596ec6b1d2cbd89ad6de9883533818acd5873ae5aa2d94c7d2b799f507a515f"} Nov 27 00:11:20 crc kubenswrapper[4903]: I1127 00:11:20.100929 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-6hzbx_3e621847-5f60-491a-8e5c-f2fb10df1726/kube-rbac-proxy/0.log" Nov 27 00:11:20 crc kubenswrapper[4903]: I1127 00:11:20.159188 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-6hzbx_3e621847-5f60-491a-8e5c-f2fb10df1726/manager/2.log" Nov 27 00:11:20 crc kubenswrapper[4903]: I1127 00:11:20.296048 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-6hzbx_3e621847-5f60-491a-8e5c-f2fb10df1726/manager/3.log" Nov 27 00:11:20 crc kubenswrapper[4903]: I1127 00:11:20.314864 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-n7krq_d9a3465f-cd49-4af9-a908-58aec0273dbe/kube-rbac-proxy/0.log" Nov 27 00:11:20 crc kubenswrapper[4903]: I1127 00:11:20.437748 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-n7krq_d9a3465f-cd49-4af9-a908-58aec0273dbe/manager/2.log" Nov 27 00:11:20 crc kubenswrapper[4903]: I1127 00:11:20.498225 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-rtztw_63feada5-3911-469e-a0b1-539b7aa2948d/kube-rbac-proxy/0.log" Nov 27 00:11:20 crc kubenswrapper[4903]: I1127 00:11:20.560062 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-n7krq_d9a3465f-cd49-4af9-a908-58aec0273dbe/manager/3.log" Nov 27 00:11:20 crc kubenswrapper[4903]: I1127 00:11:20.640296 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-rtztw_63feada5-3911-469e-a0b1-539b7aa2948d/manager/2.log" Nov 27 00:11:20 crc kubenswrapper[4903]: I1127 00:11:20.690212 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-rtztw_63feada5-3911-469e-a0b1-539b7aa2948d/manager/1.log" Nov 27 00:11:20 crc kubenswrapper[4903]: I1127 00:11:20.737497 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-shqxg_710215b7-5e67-47d8-833f-b8db638cac56/kube-rbac-proxy/0.log" Nov 27 00:11:20 crc kubenswrapper[4903]: I1127 00:11:20.919159 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-shqxg_710215b7-5e67-47d8-833f-b8db638cac56/manager/2.log" Nov 27 00:11:20 crc kubenswrapper[4903]: I1127 00:11:20.922454 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-x59hr_e0c12217-0537-436e-b0d9-5e5049888268/kube-rbac-proxy/0.log" Nov 27 00:11:20 crc kubenswrapper[4903]: I1127 00:11:20.952239 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-shqxg_710215b7-5e67-47d8-833f-b8db638cac56/manager/3.log" Nov 27 00:11:21 crc kubenswrapper[4903]: I1127 00:11:21.121092 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-x59hr_e0c12217-0537-436e-b0d9-5e5049888268/manager/2.log" Nov 27 00:11:21 crc kubenswrapper[4903]: I1127 00:11:21.208501 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-v4b66_34b48ba8-04a0-463d-9e31-b7c13127ce9c/kube-rbac-proxy/0.log" Nov 27 00:11:21 crc kubenswrapper[4903]: I1127 00:11:21.328139 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-v4b66_34b48ba8-04a0-463d-9e31-b7c13127ce9c/manager/3.log" Nov 27 00:11:21 crc kubenswrapper[4903]: I1127 00:11:21.426654 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-v4b66_34b48ba8-04a0-463d-9e31-b7c13127ce9c/manager/2.log" Nov 27 00:11:21 crc kubenswrapper[4903]: I1127 00:11:21.433714 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-x59hr_e0c12217-0537-436e-b0d9-5e5049888268/manager/3.log" Nov 27 00:11:21 crc kubenswrapper[4903]: I1127 00:11:21.680017 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-tdlsw_b34e8bed-559a-49d6-b870-c375f36be49f/kube-rbac-proxy/0.log" Nov 27 00:11:21 crc kubenswrapper[4903]: I1127 00:11:21.764956 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-tdlsw_b34e8bed-559a-49d6-b870-c375f36be49f/manager/1.log" Nov 27 00:11:21 crc kubenswrapper[4903]: I1127 00:11:21.834003 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-tdlsw_b34e8bed-559a-49d6-b870-c375f36be49f/manager/2.log" Nov 27 00:11:21 crc kubenswrapper[4903]: I1127 00:11:21.876574 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-bm7r7_ced64189-a8c9-4e13-956b-f69139a9602b/kube-rbac-proxy/0.log" Nov 27 00:11:22 crc kubenswrapper[4903]: I1127 00:11:22.011842 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-bm7r7_ced64189-a8c9-4e13-956b-f69139a9602b/manager/3.log" Nov 27 00:11:22 crc kubenswrapper[4903]: I1127 00:11:22.021024 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-bm7r7_ced64189-a8c9-4e13-956b-f69139a9602b/manager/2.log" Nov 27 00:11:22 crc kubenswrapper[4903]: I1127 00:11:22.072814 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-kxg8s_e3d89c00-9723-43a3-a1d2-866787257900/kube-rbac-proxy/0.log" Nov 27 00:11:22 crc kubenswrapper[4903]: I1127 00:11:22.219351 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-kxg8s_e3d89c00-9723-43a3-a1d2-866787257900/manager/3.log" Nov 27 00:11:22 crc kubenswrapper[4903]: I1127 00:11:22.239753 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-kxg8s_e3d89c00-9723-43a3-a1d2-866787257900/manager/2.log" Nov 27 00:11:22 crc kubenswrapper[4903]: I1127 00:11:22.292479 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-vj562_edfb7faf-e9af-4ee8-85cd-a11af5812946/kube-rbac-proxy/0.log" Nov 27 00:11:22 crc kubenswrapper[4903]: I1127 00:11:22.446274 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-vj562_edfb7faf-e9af-4ee8-85cd-a11af5812946/manager/3.log" Nov 27 00:11:22 crc kubenswrapper[4903]: I1127 00:11:22.496585 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-vj562_edfb7faf-e9af-4ee8-85cd-a11af5812946/manager/2.log" Nov 27 00:11:22 crc kubenswrapper[4903]: I1127 00:11:22.509336 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-pzwmk_32ccd880-8dfa-46d1-b262-5d10422527ec/kube-rbac-proxy/0.log" Nov 27 00:11:22 crc kubenswrapper[4903]: I1127 00:11:22.747215 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-t5gqj_9c3a16ab-252a-4a01-aaab-b273d3d55c0a/kube-rbac-proxy/0.log" Nov 27 00:11:22 crc kubenswrapper[4903]: I1127 00:11:22.897178 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-pzwmk_32ccd880-8dfa-46d1-b262-5d10422527ec/manager/1.log" Nov 27 00:11:22 crc kubenswrapper[4903]: I1127 00:11:22.989963 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4zrf7" event={"ID":"6f3daec5-d161-457e-99cd-1b1f43a0ccf8","Type":"ContainerStarted","Data":"45f6489728f141d8090be829fd895cf1632057af336e165e93c6d543e602920f"} Nov 27 00:11:23 crc kubenswrapper[4903]: I1127 00:11:23.083535 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-t5gqj_9c3a16ab-252a-4a01-aaab-b273d3d55c0a/manager/2.log" Nov 27 00:11:23 crc kubenswrapper[4903]: I1127 00:11:23.084953 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-pzwmk_32ccd880-8dfa-46d1-b262-5d10422527ec/manager/2.log" Nov 27 00:11:23 crc kubenswrapper[4903]: I1127 00:11:23.093128 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-t5gqj_9c3a16ab-252a-4a01-aaab-b273d3d55c0a/manager/3.log" Nov 27 00:11:23 crc kubenswrapper[4903]: I1127 00:11:23.251007 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-5kmlf_fcacd7dc-2b08-46d7-98c2-09cf6b6d690b/kube-rbac-proxy/0.log" Nov 27 00:11:23 crc kubenswrapper[4903]: I1127 00:11:23.309498 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-5kmlf_fcacd7dc-2b08-46d7-98c2-09cf6b6d690b/manager/2.log" Nov 27 00:11:23 crc kubenswrapper[4903]: I1127 00:11:23.313934 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-5kmlf_fcacd7dc-2b08-46d7-98c2-09cf6b6d690b/manager/3.log" Nov 27 00:11:23 crc kubenswrapper[4903]: I1127 00:11:23.369705 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-nz8x4_6b930423-80e6-4e2c-825f-7deceec090f5/kube-rbac-proxy/0.log" Nov 27 00:11:23 crc kubenswrapper[4903]: I1127 00:11:23.467819 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-nz8x4_6b930423-80e6-4e2c-825f-7deceec090f5/manager/3.log" Nov 27 00:11:23 crc kubenswrapper[4903]: I1127 00:11:23.482260 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-nz8x4_6b930423-80e6-4e2c-825f-7deceec090f5/manager/2.log" Nov 27 00:11:23 crc kubenswrapper[4903]: I1127 00:11:23.590455 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54_d4e9967e-dcf0-42c1-94fc-fea289ed54c2/kube-rbac-proxy/0.log" Nov 27 00:11:23 crc kubenswrapper[4903]: I1127 00:11:23.608599 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54_d4e9967e-dcf0-42c1-94fc-fea289ed54c2/manager/1.log" Nov 27 00:11:23 crc kubenswrapper[4903]: I1127 00:11:23.686098 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5fcdb54b6bf5v54_d4e9967e-dcf0-42c1-94fc-fea289ed54c2/manager/0.log" Nov 27 00:11:23 crc kubenswrapper[4903]: I1127 00:11:23.835373 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-5467d974c6-lpj77_9239ccfa-cbaa-44b2-a70f-94a281d885f6/manager/1.log" Nov 27 00:11:23 crc kubenswrapper[4903]: I1127 00:11:23.990402 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-5bd96487c4-8k4kq_651c7100-bdd0-41e2-8a7f-eaab13dfd391/operator/1.log" Nov 27 00:11:24 crc kubenswrapper[4903]: I1127 00:11:24.060409 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-zljmf_a348b7af-eb1c-4c45-8611-9a37a4ee9ac7/registry-server/0.log" Nov 27 00:11:24 crc kubenswrapper[4903]: I1127 00:11:24.146042 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-5bd96487c4-8k4kq_651c7100-bdd0-41e2-8a7f-eaab13dfd391/operator/0.log" Nov 27 00:11:24 crc kubenswrapper[4903]: I1127 00:11:24.297221 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-jn49q_0c7b8e09-c502-425e-ac59-b2befd1132fa/kube-rbac-proxy/0.log" Nov 27 00:11:24 crc kubenswrapper[4903]: I1127 00:11:24.359311 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-jn49q_0c7b8e09-c502-425e-ac59-b2befd1132fa/manager/3.log" Nov 27 00:11:24 crc kubenswrapper[4903]: I1127 00:11:24.370631 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-jn49q_0c7b8e09-c502-425e-ac59-b2befd1132fa/manager/2.log" Nov 27 00:11:24 crc kubenswrapper[4903]: I1127 00:11:24.489059 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-vjt6h_83927c87-ccd7-4b29-97b1-8d03ce0d1b1e/kube-rbac-proxy/0.log" Nov 27 00:11:24 crc kubenswrapper[4903]: I1127 00:11:24.575937 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-vjt6h_83927c87-ccd7-4b29-97b1-8d03ce0d1b1e/manager/3.log" Nov 27 00:11:24 crc kubenswrapper[4903]: I1127 00:11:24.599490 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-vjt6h_83927c87-ccd7-4b29-97b1-8d03ce0d1b1e/manager/2.log" Nov 27 00:11:24 crc kubenswrapper[4903]: I1127 00:11:24.666475 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-5467d974c6-lpj77_9239ccfa-cbaa-44b2-a70f-94a281d885f6/manager/2.log" Nov 27 00:11:24 crc kubenswrapper[4903]: I1127 00:11:24.788908 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-fzd8p_8248a160-f606-4eaa-9bc1-0e7fcc1ab852/operator/2.log" Nov 27 00:11:24 crc kubenswrapper[4903]: I1127 00:11:24.835216 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-fzd8p_8248a160-f606-4eaa-9bc1-0e7fcc1ab852/operator/1.log" Nov 27 00:11:24 crc kubenswrapper[4903]: I1127 00:11:24.882358 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-2h7mb_736b757c-8584-4b59-81d6-ffdd8bbac62c/kube-rbac-proxy/0.log" Nov 27 00:11:24 crc kubenswrapper[4903]: I1127 00:11:24.985056 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-2h7mb_736b757c-8584-4b59-81d6-ffdd8bbac62c/manager/2.log" Nov 27 00:11:25 crc kubenswrapper[4903]: I1127 00:11:25.018357 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-2h7mb_736b757c-8584-4b59-81d6-ffdd8bbac62c/manager/3.log" Nov 27 00:11:25 crc kubenswrapper[4903]: I1127 00:11:25.085002 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-6986c4df8b-bkqnw_3f2ebc07-fbfc-4bd6-9622-63b820e47247/kube-rbac-proxy/0.log" Nov 27 00:11:25 crc kubenswrapper[4903]: I1127 00:11:25.179167 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-6986c4df8b-bkqnw_3f2ebc07-fbfc-4bd6-9622-63b820e47247/manager/1.log" Nov 27 00:11:25 crc kubenswrapper[4903]: I1127 00:11:25.180947 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-6986c4df8b-bkqnw_3f2ebc07-fbfc-4bd6-9622-63b820e47247/manager/2.log" Nov 27 00:11:25 crc kubenswrapper[4903]: I1127 00:11:25.308559 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-gw5wx_1a890e26-66fb-47d6-85dc-ae6b9045e4c6/kube-rbac-proxy/0.log" Nov 27 00:11:25 crc kubenswrapper[4903]: I1127 00:11:25.343974 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-gw5wx_1a890e26-66fb-47d6-85dc-ae6b9045e4c6/manager/1.log" Nov 27 00:11:25 crc kubenswrapper[4903]: I1127 00:11:25.400809 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-gw5wx_1a890e26-66fb-47d6-85dc-ae6b9045e4c6/manager/0.log" Nov 27 00:11:25 crc kubenswrapper[4903]: I1127 00:11:25.487104 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-bwfhp_f8815d8e-4b34-47b3-98fa-8370205381e0/kube-rbac-proxy/0.log" Nov 27 00:11:25 crc kubenswrapper[4903]: I1127 00:11:25.545880 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-bwfhp_f8815d8e-4b34-47b3-98fa-8370205381e0/manager/2.log" Nov 27 00:11:25 crc kubenswrapper[4903]: I1127 00:11:25.553728 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-bwfhp_f8815d8e-4b34-47b3-98fa-8370205381e0/manager/1.log" Nov 27 00:11:27 crc kubenswrapper[4903]: I1127 00:11:27.042198 4903 generic.go:334] "Generic (PLEG): container finished" podID="6f3daec5-d161-457e-99cd-1b1f43a0ccf8" containerID="45f6489728f141d8090be829fd895cf1632057af336e165e93c6d543e602920f" exitCode=0 Nov 27 00:11:27 crc kubenswrapper[4903]: I1127 00:11:27.043215 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4zrf7" event={"ID":"6f3daec5-d161-457e-99cd-1b1f43a0ccf8","Type":"ContainerDied","Data":"45f6489728f141d8090be829fd895cf1632057af336e165e93c6d543e602920f"} Nov 27 00:11:31 crc kubenswrapper[4903]: I1127 00:11:31.094001 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4zrf7" event={"ID":"6f3daec5-d161-457e-99cd-1b1f43a0ccf8","Type":"ContainerStarted","Data":"dc774139d3e0ebb46d0eac55e1f04efbcf0b0ab865c688aa7fdab13d65ed5a4c"} Nov 27 00:11:31 crc kubenswrapper[4903]: I1127 00:11:31.112421 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4zrf7" podStartSLOduration=19.983370183 podStartE2EDuration="30.112402119s" podCreationTimestamp="2025-11-27 00:11:01 +0000 UTC" firstStartedPulling="2025-11-27 00:11:19.958751342 +0000 UTC m=+6608.648986252" lastFinishedPulling="2025-11-27 00:11:30.087783278 +0000 UTC m=+6618.778018188" observedRunningTime="2025-11-27 00:11:31.110725105 +0000 UTC m=+6619.800960015" watchObservedRunningTime="2025-11-27 00:11:31.112402119 +0000 UTC m=+6619.802637039" Nov 27 00:11:32 crc kubenswrapper[4903]: I1127 00:11:32.068886 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4zrf7" Nov 27 00:11:32 crc kubenswrapper[4903]: I1127 00:11:32.069129 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4zrf7" Nov 27 00:11:32 crc kubenswrapper[4903]: I1127 00:11:32.146301 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4zrf7" Nov 27 00:11:33 crc kubenswrapper[4903]: I1127 00:11:33.121886 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qssnm" event={"ID":"aacb9593-5b91-4faf-9bc1-2021c35ca0e5","Type":"ContainerStarted","Data":"cdea33ca84930da54ffafa1287729b82beb6b20366a4c3652d782195ab915505"} Nov 27 00:11:42 crc kubenswrapper[4903]: I1127 00:11:42.100327 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4zrf7" Nov 27 00:11:42 crc kubenswrapper[4903]: I1127 00:11:42.161837 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4zrf7"] Nov 27 00:11:42 crc kubenswrapper[4903]: I1127 00:11:42.225070 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-4zrf7" podUID="6f3daec5-d161-457e-99cd-1b1f43a0ccf8" containerName="registry-server" containerID="cri-o://dc774139d3e0ebb46d0eac55e1f04efbcf0b0ab865c688aa7fdab13d65ed5a4c" gracePeriod=2 Nov 27 00:11:43 crc kubenswrapper[4903]: I1127 00:11:43.067176 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4zrf7" Nov 27 00:11:43 crc kubenswrapper[4903]: I1127 00:11:43.114218 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f3daec5-d161-457e-99cd-1b1f43a0ccf8-utilities\") pod \"6f3daec5-d161-457e-99cd-1b1f43a0ccf8\" (UID: \"6f3daec5-d161-457e-99cd-1b1f43a0ccf8\") " Nov 27 00:11:43 crc kubenswrapper[4903]: I1127 00:11:43.114437 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jnw6b\" (UniqueName: \"kubernetes.io/projected/6f3daec5-d161-457e-99cd-1b1f43a0ccf8-kube-api-access-jnw6b\") pod \"6f3daec5-d161-457e-99cd-1b1f43a0ccf8\" (UID: \"6f3daec5-d161-457e-99cd-1b1f43a0ccf8\") " Nov 27 00:11:43 crc kubenswrapper[4903]: I1127 00:11:43.114516 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f3daec5-d161-457e-99cd-1b1f43a0ccf8-catalog-content\") pod \"6f3daec5-d161-457e-99cd-1b1f43a0ccf8\" (UID: \"6f3daec5-d161-457e-99cd-1b1f43a0ccf8\") " Nov 27 00:11:43 crc kubenswrapper[4903]: I1127 00:11:43.116663 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f3daec5-d161-457e-99cd-1b1f43a0ccf8-utilities" (OuterVolumeSpecName: "utilities") pod "6f3daec5-d161-457e-99cd-1b1f43a0ccf8" (UID: "6f3daec5-d161-457e-99cd-1b1f43a0ccf8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 27 00:11:43 crc kubenswrapper[4903]: I1127 00:11:43.122163 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f3daec5-d161-457e-99cd-1b1f43a0ccf8-kube-api-access-jnw6b" (OuterVolumeSpecName: "kube-api-access-jnw6b") pod "6f3daec5-d161-457e-99cd-1b1f43a0ccf8" (UID: "6f3daec5-d161-457e-99cd-1b1f43a0ccf8"). InnerVolumeSpecName "kube-api-access-jnw6b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:11:43 crc kubenswrapper[4903]: I1127 00:11:43.138258 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f3daec5-d161-457e-99cd-1b1f43a0ccf8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6f3daec5-d161-457e-99cd-1b1f43a0ccf8" (UID: "6f3daec5-d161-457e-99cd-1b1f43a0ccf8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 27 00:11:43 crc kubenswrapper[4903]: I1127 00:11:43.217950 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jnw6b\" (UniqueName: \"kubernetes.io/projected/6f3daec5-d161-457e-99cd-1b1f43a0ccf8-kube-api-access-jnw6b\") on node \"crc\" DevicePath \"\"" Nov 27 00:11:43 crc kubenswrapper[4903]: I1127 00:11:43.217987 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f3daec5-d161-457e-99cd-1b1f43a0ccf8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 27 00:11:43 crc kubenswrapper[4903]: I1127 00:11:43.217999 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f3daec5-d161-457e-99cd-1b1f43a0ccf8-utilities\") on node \"crc\" DevicePath \"\"" Nov 27 00:11:43 crc kubenswrapper[4903]: I1127 00:11:43.237177 4903 generic.go:334] "Generic (PLEG): container finished" podID="6f3daec5-d161-457e-99cd-1b1f43a0ccf8" containerID="dc774139d3e0ebb46d0eac55e1f04efbcf0b0ab865c688aa7fdab13d65ed5a4c" exitCode=0 Nov 27 00:11:43 crc kubenswrapper[4903]: I1127 00:11:43.237219 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4zrf7" event={"ID":"6f3daec5-d161-457e-99cd-1b1f43a0ccf8","Type":"ContainerDied","Data":"dc774139d3e0ebb46d0eac55e1f04efbcf0b0ab865c688aa7fdab13d65ed5a4c"} Nov 27 00:11:43 crc kubenswrapper[4903]: I1127 00:11:43.237243 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4zrf7" Nov 27 00:11:43 crc kubenswrapper[4903]: I1127 00:11:43.237261 4903 scope.go:117] "RemoveContainer" containerID="dc774139d3e0ebb46d0eac55e1f04efbcf0b0ab865c688aa7fdab13d65ed5a4c" Nov 27 00:11:43 crc kubenswrapper[4903]: I1127 00:11:43.237250 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4zrf7" event={"ID":"6f3daec5-d161-457e-99cd-1b1f43a0ccf8","Type":"ContainerDied","Data":"01c8cd506d5ee1649aaf465d457706a86319e4b30c161d1a2e1afdb01f249778"} Nov 27 00:11:43 crc kubenswrapper[4903]: I1127 00:11:43.270642 4903 scope.go:117] "RemoveContainer" containerID="45f6489728f141d8090be829fd895cf1632057af336e165e93c6d543e602920f" Nov 27 00:11:43 crc kubenswrapper[4903]: I1127 00:11:43.290433 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4zrf7"] Nov 27 00:11:43 crc kubenswrapper[4903]: I1127 00:11:43.292407 4903 scope.go:117] "RemoveContainer" containerID="b596ec6b1d2cbd89ad6de9883533818acd5873ae5aa2d94c7d2b799f507a515f" Nov 27 00:11:43 crc kubenswrapper[4903]: I1127 00:11:43.305103 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-4zrf7"] Nov 27 00:11:43 crc kubenswrapper[4903]: I1127 00:11:43.356324 4903 scope.go:117] "RemoveContainer" containerID="dc774139d3e0ebb46d0eac55e1f04efbcf0b0ab865c688aa7fdab13d65ed5a4c" Nov 27 00:11:43 crc kubenswrapper[4903]: E1127 00:11:43.356727 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc774139d3e0ebb46d0eac55e1f04efbcf0b0ab865c688aa7fdab13d65ed5a4c\": container with ID starting with dc774139d3e0ebb46d0eac55e1f04efbcf0b0ab865c688aa7fdab13d65ed5a4c not found: ID does not exist" containerID="dc774139d3e0ebb46d0eac55e1f04efbcf0b0ab865c688aa7fdab13d65ed5a4c" Nov 27 00:11:43 crc kubenswrapper[4903]: I1127 00:11:43.356772 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc774139d3e0ebb46d0eac55e1f04efbcf0b0ab865c688aa7fdab13d65ed5a4c"} err="failed to get container status \"dc774139d3e0ebb46d0eac55e1f04efbcf0b0ab865c688aa7fdab13d65ed5a4c\": rpc error: code = NotFound desc = could not find container \"dc774139d3e0ebb46d0eac55e1f04efbcf0b0ab865c688aa7fdab13d65ed5a4c\": container with ID starting with dc774139d3e0ebb46d0eac55e1f04efbcf0b0ab865c688aa7fdab13d65ed5a4c not found: ID does not exist" Nov 27 00:11:43 crc kubenswrapper[4903]: I1127 00:11:43.356797 4903 scope.go:117] "RemoveContainer" containerID="45f6489728f141d8090be829fd895cf1632057af336e165e93c6d543e602920f" Nov 27 00:11:43 crc kubenswrapper[4903]: E1127 00:11:43.357175 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45f6489728f141d8090be829fd895cf1632057af336e165e93c6d543e602920f\": container with ID starting with 45f6489728f141d8090be829fd895cf1632057af336e165e93c6d543e602920f not found: ID does not exist" containerID="45f6489728f141d8090be829fd895cf1632057af336e165e93c6d543e602920f" Nov 27 00:11:43 crc kubenswrapper[4903]: I1127 00:11:43.357207 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45f6489728f141d8090be829fd895cf1632057af336e165e93c6d543e602920f"} err="failed to get container status \"45f6489728f141d8090be829fd895cf1632057af336e165e93c6d543e602920f\": rpc error: code = NotFound desc = could not find container \"45f6489728f141d8090be829fd895cf1632057af336e165e93c6d543e602920f\": container with ID starting with 45f6489728f141d8090be829fd895cf1632057af336e165e93c6d543e602920f not found: ID does not exist" Nov 27 00:11:43 crc kubenswrapper[4903]: I1127 00:11:43.357228 4903 scope.go:117] "RemoveContainer" containerID="b596ec6b1d2cbd89ad6de9883533818acd5873ae5aa2d94c7d2b799f507a515f" Nov 27 00:11:43 crc kubenswrapper[4903]: E1127 00:11:43.357881 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b596ec6b1d2cbd89ad6de9883533818acd5873ae5aa2d94c7d2b799f507a515f\": container with ID starting with b596ec6b1d2cbd89ad6de9883533818acd5873ae5aa2d94c7d2b799f507a515f not found: ID does not exist" containerID="b596ec6b1d2cbd89ad6de9883533818acd5873ae5aa2d94c7d2b799f507a515f" Nov 27 00:11:43 crc kubenswrapper[4903]: I1127 00:11:43.357908 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b596ec6b1d2cbd89ad6de9883533818acd5873ae5aa2d94c7d2b799f507a515f"} err="failed to get container status \"b596ec6b1d2cbd89ad6de9883533818acd5873ae5aa2d94c7d2b799f507a515f\": rpc error: code = NotFound desc = could not find container \"b596ec6b1d2cbd89ad6de9883533818acd5873ae5aa2d94c7d2b799f507a515f\": container with ID starting with b596ec6b1d2cbd89ad6de9883533818acd5873ae5aa2d94c7d2b799f507a515f not found: ID does not exist" Nov 27 00:11:44 crc kubenswrapper[4903]: I1127 00:11:44.045461 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f3daec5-d161-457e-99cd-1b1f43a0ccf8" path="/var/lib/kubelet/pods/6f3daec5-d161-457e-99cd-1b1f43a0ccf8/volumes" Nov 27 00:11:47 crc kubenswrapper[4903]: I1127 00:11:47.193441 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-zb6l2_72c47664-999f-45b2-b047-184bdc7d8c58/control-plane-machine-set-operator/0.log" Nov 27 00:11:47 crc kubenswrapper[4903]: I1127 00:11:47.293659 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-x242l_27420cfc-cc8f-4482-9206-706ab7bf9430/kube-rbac-proxy/0.log" Nov 27 00:11:47 crc kubenswrapper[4903]: I1127 00:11:47.431336 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-x242l_27420cfc-cc8f-4482-9206-706ab7bf9430/machine-api-operator/0.log" Nov 27 00:11:50 crc kubenswrapper[4903]: I1127 00:11:50.319953 4903 generic.go:334] "Generic (PLEG): container finished" podID="aacb9593-5b91-4faf-9bc1-2021c35ca0e5" containerID="cdea33ca84930da54ffafa1287729b82beb6b20366a4c3652d782195ab915505" exitCode=0 Nov 27 00:11:50 crc kubenswrapper[4903]: I1127 00:11:50.320125 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qssnm" event={"ID":"aacb9593-5b91-4faf-9bc1-2021c35ca0e5","Type":"ContainerDied","Data":"cdea33ca84930da54ffafa1287729b82beb6b20366a4c3652d782195ab915505"} Nov 27 00:11:54 crc kubenswrapper[4903]: I1127 00:11:54.375281 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qssnm" event={"ID":"aacb9593-5b91-4faf-9bc1-2021c35ca0e5","Type":"ContainerStarted","Data":"57281f30c3c480b8d2371c23406e1876d53c662c31590d3b0b7df5d7b4bc6e97"} Nov 27 00:11:54 crc kubenswrapper[4903]: I1127 00:11:54.405913 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-qssnm" podStartSLOduration=3.335768992 podStartE2EDuration="1m16.405891928s" podCreationTimestamp="2025-11-27 00:10:38 +0000 UTC" firstStartedPulling="2025-11-27 00:10:40.306717083 +0000 UTC m=+6568.996951993" lastFinishedPulling="2025-11-27 00:11:53.376840019 +0000 UTC m=+6642.067074929" observedRunningTime="2025-11-27 00:11:54.393318263 +0000 UTC m=+6643.083553183" watchObservedRunningTime="2025-11-27 00:11:54.405891928 +0000 UTC m=+6643.096126838" Nov 27 00:11:58 crc kubenswrapper[4903]: I1127 00:11:58.886472 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-qssnm" Nov 27 00:11:58 crc kubenswrapper[4903]: I1127 00:11:58.886930 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-qssnm" Nov 27 00:11:59 crc kubenswrapper[4903]: I1127 00:11:59.939116 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-qssnm" podUID="aacb9593-5b91-4faf-9bc1-2021c35ca0e5" containerName="registry-server" probeResult="failure" output=< Nov 27 00:11:59 crc kubenswrapper[4903]: timeout: failed to connect service ":50051" within 1s Nov 27 00:11:59 crc kubenswrapper[4903]: > Nov 27 00:12:01 crc kubenswrapper[4903]: I1127 00:12:01.590571 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-qdk8j_de11c064-60b1-4f96-a316-bc903f061766/cert-manager-controller/0.log" Nov 27 00:12:01 crc kubenswrapper[4903]: I1127 00:12:01.664428 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-qdk8j_de11c064-60b1-4f96-a316-bc903f061766/cert-manager-controller/1.log" Nov 27 00:12:02 crc kubenswrapper[4903]: I1127 00:12:02.042952 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-dfvzf_580a58c8-ce17-4d85-991a-e51d3eb639b3/cert-manager-cainjector/1.log" Nov 27 00:12:02 crc kubenswrapper[4903]: I1127 00:12:02.097716 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-dfvzf_580a58c8-ce17-4d85-991a-e51d3eb639b3/cert-manager-cainjector/0.log" Nov 27 00:12:02 crc kubenswrapper[4903]: I1127 00:12:02.242292 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-zz98c_da38aa1b-878d-476e-b742-7329a813bf99/cert-manager-webhook/0.log" Nov 27 00:12:09 crc kubenswrapper[4903]: I1127 00:12:09.952897 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-qssnm" podUID="aacb9593-5b91-4faf-9bc1-2021c35ca0e5" containerName="registry-server" probeResult="failure" output=< Nov 27 00:12:09 crc kubenswrapper[4903]: timeout: failed to connect service ":50051" within 1s Nov 27 00:12:09 crc kubenswrapper[4903]: > Nov 27 00:12:16 crc kubenswrapper[4903]: I1127 00:12:16.993553 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-hpzbl_872167df-4435-42c4-9503-8bfca809574f/nmstate-console-plugin/0.log" Nov 27 00:12:17 crc kubenswrapper[4903]: I1127 00:12:17.179283 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-v5kzk_097195ec-5a3f-4d57-b864-264165398ff6/nmstate-handler/0.log" Nov 27 00:12:17 crc kubenswrapper[4903]: I1127 00:12:17.212752 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-ctqlb_500a4a0f-2474-482b-9f47-7304d9bd35e9/kube-rbac-proxy/0.log" Nov 27 00:12:17 crc kubenswrapper[4903]: I1127 00:12:17.362018 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-ctqlb_500a4a0f-2474-482b-9f47-7304d9bd35e9/nmstate-metrics/0.log" Nov 27 00:12:17 crc kubenswrapper[4903]: I1127 00:12:17.448098 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-btnn5_f0eef496-9727-4ee7-9c31-c2afcb9303c6/nmstate-operator/0.log" Nov 27 00:12:17 crc kubenswrapper[4903]: I1127 00:12:17.592032 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-9rhdl_8ebd57a0-155f-481c-9d2e-11c69e14b6fc/nmstate-webhook/0.log" Nov 27 00:12:19 crc kubenswrapper[4903]: I1127 00:12:19.938495 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-qssnm" podUID="aacb9593-5b91-4faf-9bc1-2021c35ca0e5" containerName="registry-server" probeResult="failure" output=< Nov 27 00:12:19 crc kubenswrapper[4903]: timeout: failed to connect service ":50051" within 1s Nov 27 00:12:19 crc kubenswrapper[4903]: > Nov 27 00:12:29 crc kubenswrapper[4903]: I1127 00:12:29.938107 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-qssnm" podUID="aacb9593-5b91-4faf-9bc1-2021c35ca0e5" containerName="registry-server" probeResult="failure" output=< Nov 27 00:12:29 crc kubenswrapper[4903]: timeout: failed to connect service ":50051" within 1s Nov 27 00:12:29 crc kubenswrapper[4903]: > Nov 27 00:12:31 crc kubenswrapper[4903]: I1127 00:12:31.235194 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-5c85bfb685-pwxll_a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b/kube-rbac-proxy/0.log" Nov 27 00:12:31 crc kubenswrapper[4903]: I1127 00:12:31.337719 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-5c85bfb685-pwxll_a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b/manager/1.log" Nov 27 00:12:31 crc kubenswrapper[4903]: I1127 00:12:31.475555 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-5c85bfb685-pwxll_a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b/manager/0.log" Nov 27 00:12:31 crc kubenswrapper[4903]: I1127 00:12:31.981079 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 27 00:12:31 crc kubenswrapper[4903]: I1127 00:12:31.981421 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 27 00:12:39 crc kubenswrapper[4903]: I1127 00:12:39.944118 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-qssnm" podUID="aacb9593-5b91-4faf-9bc1-2021c35ca0e5" containerName="registry-server" probeResult="failure" output=< Nov 27 00:12:39 crc kubenswrapper[4903]: timeout: failed to connect service ":50051" within 1s Nov 27 00:12:39 crc kubenswrapper[4903]: > Nov 27 00:12:47 crc kubenswrapper[4903]: I1127 00:12:47.566041 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_cluster-logging-operator-ff9846bd-xslxr_a56d21dd-5874-4bc1-9fe3-5fc6a5b4a354/cluster-logging-operator/0.log" Nov 27 00:12:47 crc kubenswrapper[4903]: I1127 00:12:47.780310 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_collector-xj985_49456ff3-4275-428d-84cc-25664a331100/collector/0.log" Nov 27 00:12:47 crc kubenswrapper[4903]: I1127 00:12:47.796291 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-compactor-0_0cb383d8-296b-4298-8f2f-28edb1f1278f/loki-compactor/0.log" Nov 27 00:12:48 crc kubenswrapper[4903]: I1127 00:12:48.013646 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-6b8dc7bf86-g4fbs_16a3e6c0-118c-4827-b39b-d9a59d959fec/gateway/0.log" Nov 27 00:12:48 crc kubenswrapper[4903]: I1127 00:12:48.014716 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-distributor-76cc67bf56-htm5b_546d4145-a63b-4664-86d0-9ce432670a7b/loki-distributor/0.log" Nov 27 00:12:48 crc kubenswrapper[4903]: I1127 00:12:48.073411 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-6b8dc7bf86-g4fbs_16a3e6c0-118c-4827-b39b-d9a59d959fec/opa/0.log" Nov 27 00:12:48 crc kubenswrapper[4903]: I1127 00:12:48.221537 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-6b8dc7bf86-tvgns_64e0c0a9-13e7-4f0b-989d-8f217958cd92/gateway/0.log" Nov 27 00:12:48 crc kubenswrapper[4903]: I1127 00:12:48.274032 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-6b8dc7bf86-tvgns_64e0c0a9-13e7-4f0b-989d-8f217958cd92/opa/0.log" Nov 27 00:12:48 crc kubenswrapper[4903]: I1127 00:12:48.454774 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-index-gateway-0_d4ca2376-fa84-4a6c-b47b-3661bacfd578/loki-index-gateway/0.log" Nov 27 00:12:48 crc kubenswrapper[4903]: I1127 00:12:48.568511 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-ingester-0_23d0313e-2bdb-4054-8951-2e29fd19f371/loki-ingester/0.log" Nov 27 00:12:48 crc kubenswrapper[4903]: I1127 00:12:48.675108 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-querier-5895d59bb8-k45nx_138798d6-77b9-4e20-970b-d83e0378e667/loki-querier/0.log" Nov 27 00:12:48 crc kubenswrapper[4903]: I1127 00:12:48.794080 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-query-frontend-84558f7c9f-52tt7_9fb3c717-adf5-483c-9d16-6d47d489a5e1/loki-query-frontend/0.log" Nov 27 00:12:48 crc kubenswrapper[4903]: I1127 00:12:48.946679 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-qssnm" Nov 27 00:12:49 crc kubenswrapper[4903]: I1127 00:12:49.012585 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-qssnm" Nov 27 00:12:49 crc kubenswrapper[4903]: I1127 00:12:49.128938 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qssnm"] Nov 27 00:12:49 crc kubenswrapper[4903]: I1127 00:12:49.199029 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-khkfx"] Nov 27 00:12:49 crc kubenswrapper[4903]: I1127 00:12:49.199255 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-khkfx" podUID="ca8619ad-8673-4b83-907d-e274c4cd11ac" containerName="registry-server" containerID="cri-o://9b152c881b5dc89169aed3296983b9776809c28fd2d1541938f241e5ad76c52c" gracePeriod=2 Nov 27 00:12:49 crc kubenswrapper[4903]: I1127 00:12:49.836619 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-khkfx" Nov 27 00:12:49 crc kubenswrapper[4903]: I1127 00:12:49.876670 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ca8619ad-8673-4b83-907d-e274c4cd11ac-utilities\") pod \"ca8619ad-8673-4b83-907d-e274c4cd11ac\" (UID: \"ca8619ad-8673-4b83-907d-e274c4cd11ac\") " Nov 27 00:12:49 crc kubenswrapper[4903]: I1127 00:12:49.876817 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ca8619ad-8673-4b83-907d-e274c4cd11ac-catalog-content\") pod \"ca8619ad-8673-4b83-907d-e274c4cd11ac\" (UID: \"ca8619ad-8673-4b83-907d-e274c4cd11ac\") " Nov 27 00:12:49 crc kubenswrapper[4903]: I1127 00:12:49.876934 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hc2qx\" (UniqueName: \"kubernetes.io/projected/ca8619ad-8673-4b83-907d-e274c4cd11ac-kube-api-access-hc2qx\") pod \"ca8619ad-8673-4b83-907d-e274c4cd11ac\" (UID: \"ca8619ad-8673-4b83-907d-e274c4cd11ac\") " Nov 27 00:12:49 crc kubenswrapper[4903]: I1127 00:12:49.878067 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ca8619ad-8673-4b83-907d-e274c4cd11ac-utilities" (OuterVolumeSpecName: "utilities") pod "ca8619ad-8673-4b83-907d-e274c4cd11ac" (UID: "ca8619ad-8673-4b83-907d-e274c4cd11ac"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 27 00:12:49 crc kubenswrapper[4903]: I1127 00:12:49.902060 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca8619ad-8673-4b83-907d-e274c4cd11ac-kube-api-access-hc2qx" (OuterVolumeSpecName: "kube-api-access-hc2qx") pod "ca8619ad-8673-4b83-907d-e274c4cd11ac" (UID: "ca8619ad-8673-4b83-907d-e274c4cd11ac"). InnerVolumeSpecName "kube-api-access-hc2qx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:12:49 crc kubenswrapper[4903]: I1127 00:12:49.979638 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ca8619ad-8673-4b83-907d-e274c4cd11ac-utilities\") on node \"crc\" DevicePath \"\"" Nov 27 00:12:49 crc kubenswrapper[4903]: I1127 00:12:49.979687 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hc2qx\" (UniqueName: \"kubernetes.io/projected/ca8619ad-8673-4b83-907d-e274c4cd11ac-kube-api-access-hc2qx\") on node \"crc\" DevicePath \"\"" Nov 27 00:12:50 crc kubenswrapper[4903]: I1127 00:12:50.018349 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ca8619ad-8673-4b83-907d-e274c4cd11ac-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ca8619ad-8673-4b83-907d-e274c4cd11ac" (UID: "ca8619ad-8673-4b83-907d-e274c4cd11ac"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 27 00:12:50 crc kubenswrapper[4903]: I1127 00:12:50.063092 4903 generic.go:334] "Generic (PLEG): container finished" podID="ca8619ad-8673-4b83-907d-e274c4cd11ac" containerID="9b152c881b5dc89169aed3296983b9776809c28fd2d1541938f241e5ad76c52c" exitCode=0 Nov 27 00:12:50 crc kubenswrapper[4903]: I1127 00:12:50.063302 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-khkfx" event={"ID":"ca8619ad-8673-4b83-907d-e274c4cd11ac","Type":"ContainerDied","Data":"9b152c881b5dc89169aed3296983b9776809c28fd2d1541938f241e5ad76c52c"} Nov 27 00:12:50 crc kubenswrapper[4903]: I1127 00:12:50.063342 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-khkfx" Nov 27 00:12:50 crc kubenswrapper[4903]: I1127 00:12:50.063366 4903 scope.go:117] "RemoveContainer" containerID="9b152c881b5dc89169aed3296983b9776809c28fd2d1541938f241e5ad76c52c" Nov 27 00:12:50 crc kubenswrapper[4903]: I1127 00:12:50.063352 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-khkfx" event={"ID":"ca8619ad-8673-4b83-907d-e274c4cd11ac","Type":"ContainerDied","Data":"4bb5e7936357f86fc57337a502610c13f2ef06b561044dd59e1880f7ad5f47a9"} Nov 27 00:12:50 crc kubenswrapper[4903]: I1127 00:12:50.081799 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ca8619ad-8673-4b83-907d-e274c4cd11ac-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 27 00:12:50 crc kubenswrapper[4903]: I1127 00:12:50.096050 4903 scope.go:117] "RemoveContainer" containerID="1cc890d6fde168409a6b34171524e971237ca625b47f784dbc33b1302738f910" Nov 27 00:12:50 crc kubenswrapper[4903]: I1127 00:12:50.107009 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-khkfx"] Nov 27 00:12:50 crc kubenswrapper[4903]: I1127 00:12:50.137454 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-khkfx"] Nov 27 00:12:50 crc kubenswrapper[4903]: I1127 00:12:50.147558 4903 scope.go:117] "RemoveContainer" containerID="f2540d6cc212f3c08e2b414e4fe700b233f77f38287b7ce47a54d4c94272c7bf" Nov 27 00:12:50 crc kubenswrapper[4903]: I1127 00:12:50.190038 4903 scope.go:117] "RemoveContainer" containerID="9b152c881b5dc89169aed3296983b9776809c28fd2d1541938f241e5ad76c52c" Nov 27 00:12:50 crc kubenswrapper[4903]: E1127 00:12:50.191314 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b152c881b5dc89169aed3296983b9776809c28fd2d1541938f241e5ad76c52c\": container with ID starting with 9b152c881b5dc89169aed3296983b9776809c28fd2d1541938f241e5ad76c52c not found: ID does not exist" containerID="9b152c881b5dc89169aed3296983b9776809c28fd2d1541938f241e5ad76c52c" Nov 27 00:12:50 crc kubenswrapper[4903]: I1127 00:12:50.191427 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b152c881b5dc89169aed3296983b9776809c28fd2d1541938f241e5ad76c52c"} err="failed to get container status \"9b152c881b5dc89169aed3296983b9776809c28fd2d1541938f241e5ad76c52c\": rpc error: code = NotFound desc = could not find container \"9b152c881b5dc89169aed3296983b9776809c28fd2d1541938f241e5ad76c52c\": container with ID starting with 9b152c881b5dc89169aed3296983b9776809c28fd2d1541938f241e5ad76c52c not found: ID does not exist" Nov 27 00:12:50 crc kubenswrapper[4903]: I1127 00:12:50.191509 4903 scope.go:117] "RemoveContainer" containerID="1cc890d6fde168409a6b34171524e971237ca625b47f784dbc33b1302738f910" Nov 27 00:12:50 crc kubenswrapper[4903]: E1127 00:12:50.192281 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1cc890d6fde168409a6b34171524e971237ca625b47f784dbc33b1302738f910\": container with ID starting with 1cc890d6fde168409a6b34171524e971237ca625b47f784dbc33b1302738f910 not found: ID does not exist" containerID="1cc890d6fde168409a6b34171524e971237ca625b47f784dbc33b1302738f910" Nov 27 00:12:50 crc kubenswrapper[4903]: I1127 00:12:50.192377 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1cc890d6fde168409a6b34171524e971237ca625b47f784dbc33b1302738f910"} err="failed to get container status \"1cc890d6fde168409a6b34171524e971237ca625b47f784dbc33b1302738f910\": rpc error: code = NotFound desc = could not find container \"1cc890d6fde168409a6b34171524e971237ca625b47f784dbc33b1302738f910\": container with ID starting with 1cc890d6fde168409a6b34171524e971237ca625b47f784dbc33b1302738f910 not found: ID does not exist" Nov 27 00:12:50 crc kubenswrapper[4903]: I1127 00:12:50.192470 4903 scope.go:117] "RemoveContainer" containerID="f2540d6cc212f3c08e2b414e4fe700b233f77f38287b7ce47a54d4c94272c7bf" Nov 27 00:12:50 crc kubenswrapper[4903]: E1127 00:12:50.193032 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f2540d6cc212f3c08e2b414e4fe700b233f77f38287b7ce47a54d4c94272c7bf\": container with ID starting with f2540d6cc212f3c08e2b414e4fe700b233f77f38287b7ce47a54d4c94272c7bf not found: ID does not exist" containerID="f2540d6cc212f3c08e2b414e4fe700b233f77f38287b7ce47a54d4c94272c7bf" Nov 27 00:12:50 crc kubenswrapper[4903]: I1127 00:12:50.193118 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f2540d6cc212f3c08e2b414e4fe700b233f77f38287b7ce47a54d4c94272c7bf"} err="failed to get container status \"f2540d6cc212f3c08e2b414e4fe700b233f77f38287b7ce47a54d4c94272c7bf\": rpc error: code = NotFound desc = could not find container \"f2540d6cc212f3c08e2b414e4fe700b233f77f38287b7ce47a54d4c94272c7bf\": container with ID starting with f2540d6cc212f3c08e2b414e4fe700b233f77f38287b7ce47a54d4c94272c7bf not found: ID does not exist" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.039726 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ca8619ad-8673-4b83-907d-e274c4cd11ac" path="/var/lib/kubelet/pods/ca8619ad-8673-4b83-907d-e274c4cd11ac/volumes" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.275632 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr"] Nov 27 00:12:52 crc kubenswrapper[4903]: E1127 00:12:52.276368 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f3daec5-d161-457e-99cd-1b1f43a0ccf8" containerName="extract-content" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.276385 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f3daec5-d161-457e-99cd-1b1f43a0ccf8" containerName="extract-content" Nov 27 00:12:52 crc kubenswrapper[4903]: E1127 00:12:52.276397 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca8619ad-8673-4b83-907d-e274c4cd11ac" containerName="extract-utilities" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.276405 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca8619ad-8673-4b83-907d-e274c4cd11ac" containerName="extract-utilities" Nov 27 00:12:52 crc kubenswrapper[4903]: E1127 00:12:52.276434 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f3daec5-d161-457e-99cd-1b1f43a0ccf8" containerName="extract-utilities" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.276440 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f3daec5-d161-457e-99cd-1b1f43a0ccf8" containerName="extract-utilities" Nov 27 00:12:52 crc kubenswrapper[4903]: E1127 00:12:52.276459 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca8619ad-8673-4b83-907d-e274c4cd11ac" containerName="registry-server" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.276465 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca8619ad-8673-4b83-907d-e274c4cd11ac" containerName="registry-server" Nov 27 00:12:52 crc kubenswrapper[4903]: E1127 00:12:52.276488 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca8619ad-8673-4b83-907d-e274c4cd11ac" containerName="extract-content" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.276494 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca8619ad-8673-4b83-907d-e274c4cd11ac" containerName="extract-content" Nov 27 00:12:52 crc kubenswrapper[4903]: E1127 00:12:52.276513 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f3daec5-d161-457e-99cd-1b1f43a0ccf8" containerName="registry-server" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.276521 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f3daec5-d161-457e-99cd-1b1f43a0ccf8" containerName="registry-server" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.276741 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca8619ad-8673-4b83-907d-e274c4cd11ac" containerName="registry-server" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.276759 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f3daec5-d161-457e-99cd-1b1f43a0ccf8" containerName="registry-server" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.282982 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.286231 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.315330 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr"] Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.334284 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/20425297-6df8-4b47-bade-0dd5e5539827-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr\" (UID: \"20425297-6df8-4b47-bade-0dd5e5539827\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.334358 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fg455\" (UniqueName: \"kubernetes.io/projected/20425297-6df8-4b47-bade-0dd5e5539827-kube-api-access-fg455\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr\" (UID: \"20425297-6df8-4b47-bade-0dd5e5539827\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.334384 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/20425297-6df8-4b47-bade-0dd5e5539827-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr\" (UID: \"20425297-6df8-4b47-bade-0dd5e5539827\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.436363 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/20425297-6df8-4b47-bade-0dd5e5539827-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr\" (UID: \"20425297-6df8-4b47-bade-0dd5e5539827\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.436597 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/20425297-6df8-4b47-bade-0dd5e5539827-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr\" (UID: \"20425297-6df8-4b47-bade-0dd5e5539827\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.436642 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fg455\" (UniqueName: \"kubernetes.io/projected/20425297-6df8-4b47-bade-0dd5e5539827-kube-api-access-fg455\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr\" (UID: \"20425297-6df8-4b47-bade-0dd5e5539827\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.436862 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/20425297-6df8-4b47-bade-0dd5e5539827-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr\" (UID: \"20425297-6df8-4b47-bade-0dd5e5539827\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.437095 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/20425297-6df8-4b47-bade-0dd5e5539827-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr\" (UID: \"20425297-6df8-4b47-bade-0dd5e5539827\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.473308 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fg455\" (UniqueName: \"kubernetes.io/projected/20425297-6df8-4b47-bade-0dd5e5539827-kube-api-access-fg455\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr\" (UID: \"20425297-6df8-4b47-bade-0dd5e5539827\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.618554 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.658295 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr"] Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.661168 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.687796 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr"] Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.744415 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwmnr\" (UniqueName: \"kubernetes.io/projected/cad43621-97ed-4d84-85dc-482bafe37257-kube-api-access-xwmnr\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr\" (UID: \"cad43621-97ed-4d84-85dc-482bafe37257\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.744636 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/cad43621-97ed-4d84-85dc-482bafe37257-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr\" (UID: \"cad43621-97ed-4d84-85dc-482bafe37257\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.744675 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/cad43621-97ed-4d84-85dc-482bafe37257-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr\" (UID: \"cad43621-97ed-4d84-85dc-482bafe37257\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.848745 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/cad43621-97ed-4d84-85dc-482bafe37257-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr\" (UID: \"cad43621-97ed-4d84-85dc-482bafe37257\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.848791 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/cad43621-97ed-4d84-85dc-482bafe37257-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr\" (UID: \"cad43621-97ed-4d84-85dc-482bafe37257\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.848979 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwmnr\" (UniqueName: \"kubernetes.io/projected/cad43621-97ed-4d84-85dc-482bafe37257-kube-api-access-xwmnr\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr\" (UID: \"cad43621-97ed-4d84-85dc-482bafe37257\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.849287 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/cad43621-97ed-4d84-85dc-482bafe37257-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr\" (UID: \"cad43621-97ed-4d84-85dc-482bafe37257\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.849538 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/cad43621-97ed-4d84-85dc-482bafe37257-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr\" (UID: \"cad43621-97ed-4d84-85dc-482bafe37257\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr" Nov 27 00:12:52 crc kubenswrapper[4903]: I1127 00:12:52.869333 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwmnr\" (UniqueName: \"kubernetes.io/projected/cad43621-97ed-4d84-85dc-482bafe37257-kube-api-access-xwmnr\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr\" (UID: \"cad43621-97ed-4d84-85dc-482bafe37257\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr" Nov 27 00:12:53 crc kubenswrapper[4903]: I1127 00:12:53.090104 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr" Nov 27 00:12:53 crc kubenswrapper[4903]: I1127 00:12:53.167259 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr"] Nov 27 00:12:54 crc kubenswrapper[4903]: I1127 00:12:54.135156 4903 generic.go:334] "Generic (PLEG): container finished" podID="20425297-6df8-4b47-bade-0dd5e5539827" containerID="e418116a6950fd55175743538317102d339f38be8f2eeabb2f3be2f2bb679812" exitCode=0 Nov 27 00:12:54 crc kubenswrapper[4903]: I1127 00:12:54.135197 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr" event={"ID":"20425297-6df8-4b47-bade-0dd5e5539827","Type":"ContainerDied","Data":"e418116a6950fd55175743538317102d339f38be8f2eeabb2f3be2f2bb679812"} Nov 27 00:12:54 crc kubenswrapper[4903]: I1127 00:12:54.135843 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr" event={"ID":"20425297-6df8-4b47-bade-0dd5e5539827","Type":"ContainerStarted","Data":"786b4b0cfe2a8629d2b88776b5792e619b056569b8c2f2cf38a8fee4515afa98"} Nov 27 00:12:54 crc kubenswrapper[4903]: I1127 00:12:54.268319 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr"] Nov 27 00:12:54 crc kubenswrapper[4903]: W1127 00:12:54.272491 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcad43621_97ed_4d84_85dc_482bafe37257.slice/crio-bbf09c7e67f59f766ec6addc40493f4ceca24863b9ce1747e8d21cf88fc800b4 WatchSource:0}: Error finding container bbf09c7e67f59f766ec6addc40493f4ceca24863b9ce1747e8d21cf88fc800b4: Status 404 returned error can't find the container with id bbf09c7e67f59f766ec6addc40493f4ceca24863b9ce1747e8d21cf88fc800b4 Nov 27 00:12:55 crc kubenswrapper[4903]: I1127 00:12:55.148103 4903 generic.go:334] "Generic (PLEG): container finished" podID="cad43621-97ed-4d84-85dc-482bafe37257" containerID="3e34d7b5ebc3cc14625848766faf53a717a8631160ce23b7c717dbc424c4ad77" exitCode=0 Nov 27 00:12:55 crc kubenswrapper[4903]: I1127 00:12:55.148158 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr" event={"ID":"cad43621-97ed-4d84-85dc-482bafe37257","Type":"ContainerDied","Data":"3e34d7b5ebc3cc14625848766faf53a717a8631160ce23b7c717dbc424c4ad77"} Nov 27 00:12:55 crc kubenswrapper[4903]: I1127 00:12:55.148657 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr" event={"ID":"cad43621-97ed-4d84-85dc-482bafe37257","Type":"ContainerStarted","Data":"bbf09c7e67f59f766ec6addc40493f4ceca24863b9ce1747e8d21cf88fc800b4"} Nov 27 00:12:56 crc kubenswrapper[4903]: I1127 00:12:56.162191 4903 generic.go:334] "Generic (PLEG): container finished" podID="20425297-6df8-4b47-bade-0dd5e5539827" containerID="6ebfc58175cb59e4ea21b5136b76c0cca5fde893c22fc004b0b06c4d7a3d4f06" exitCode=0 Nov 27 00:12:56 crc kubenswrapper[4903]: I1127 00:12:56.162246 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr" event={"ID":"20425297-6df8-4b47-bade-0dd5e5539827","Type":"ContainerDied","Data":"6ebfc58175cb59e4ea21b5136b76c0cca5fde893c22fc004b0b06c4d7a3d4f06"} Nov 27 00:12:57 crc kubenswrapper[4903]: I1127 00:12:57.175285 4903 generic.go:334] "Generic (PLEG): container finished" podID="cad43621-97ed-4d84-85dc-482bafe37257" containerID="0d144f6efe39133870a5cb2a8176921b235a54af3900f15b82764e5803dc8356" exitCode=0 Nov 27 00:12:57 crc kubenswrapper[4903]: I1127 00:12:57.175402 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr" event={"ID":"cad43621-97ed-4d84-85dc-482bafe37257","Type":"ContainerDied","Data":"0d144f6efe39133870a5cb2a8176921b235a54af3900f15b82764e5803dc8356"} Nov 27 00:12:57 crc kubenswrapper[4903]: I1127 00:12:57.180485 4903 generic.go:334] "Generic (PLEG): container finished" podID="20425297-6df8-4b47-bade-0dd5e5539827" containerID="5d636529606466a94e75e739d63b906528fcffc8b2857d1593e22f4af59e9c6a" exitCode=0 Nov 27 00:12:57 crc kubenswrapper[4903]: I1127 00:12:57.180531 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr" event={"ID":"20425297-6df8-4b47-bade-0dd5e5539827","Type":"ContainerDied","Data":"5d636529606466a94e75e739d63b906528fcffc8b2857d1593e22f4af59e9c6a"} Nov 27 00:12:58 crc kubenswrapper[4903]: I1127 00:12:58.194156 4903 generic.go:334] "Generic (PLEG): container finished" podID="cad43621-97ed-4d84-85dc-482bafe37257" containerID="8978f31736f4aabdd16f68e7b7ca9efdd6d8c8ec7f9bb1c3908af428d08a8cc8" exitCode=0 Nov 27 00:12:58 crc kubenswrapper[4903]: I1127 00:12:58.194239 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr" event={"ID":"cad43621-97ed-4d84-85dc-482bafe37257","Type":"ContainerDied","Data":"8978f31736f4aabdd16f68e7b7ca9efdd6d8c8ec7f9bb1c3908af428d08a8cc8"} Nov 27 00:12:58 crc kubenswrapper[4903]: I1127 00:12:58.650605 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr" Nov 27 00:12:58 crc kubenswrapper[4903]: I1127 00:12:58.809312 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fg455\" (UniqueName: \"kubernetes.io/projected/20425297-6df8-4b47-bade-0dd5e5539827-kube-api-access-fg455\") pod \"20425297-6df8-4b47-bade-0dd5e5539827\" (UID: \"20425297-6df8-4b47-bade-0dd5e5539827\") " Nov 27 00:12:58 crc kubenswrapper[4903]: I1127 00:12:58.809398 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/20425297-6df8-4b47-bade-0dd5e5539827-util\") pod \"20425297-6df8-4b47-bade-0dd5e5539827\" (UID: \"20425297-6df8-4b47-bade-0dd5e5539827\") " Nov 27 00:12:58 crc kubenswrapper[4903]: I1127 00:12:58.809505 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/20425297-6df8-4b47-bade-0dd5e5539827-bundle\") pod \"20425297-6df8-4b47-bade-0dd5e5539827\" (UID: \"20425297-6df8-4b47-bade-0dd5e5539827\") " Nov 27 00:12:58 crc kubenswrapper[4903]: I1127 00:12:58.810057 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20425297-6df8-4b47-bade-0dd5e5539827-bundle" (OuterVolumeSpecName: "bundle") pod "20425297-6df8-4b47-bade-0dd5e5539827" (UID: "20425297-6df8-4b47-bade-0dd5e5539827"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 27 00:12:58 crc kubenswrapper[4903]: I1127 00:12:58.815938 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20425297-6df8-4b47-bade-0dd5e5539827-kube-api-access-fg455" (OuterVolumeSpecName: "kube-api-access-fg455") pod "20425297-6df8-4b47-bade-0dd5e5539827" (UID: "20425297-6df8-4b47-bade-0dd5e5539827"). InnerVolumeSpecName "kube-api-access-fg455". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:12:58 crc kubenswrapper[4903]: I1127 00:12:58.912101 4903 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/20425297-6df8-4b47-bade-0dd5e5539827-bundle\") on node \"crc\" DevicePath \"\"" Nov 27 00:12:58 crc kubenswrapper[4903]: I1127 00:12:58.912419 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fg455\" (UniqueName: \"kubernetes.io/projected/20425297-6df8-4b47-bade-0dd5e5539827-kube-api-access-fg455\") on node \"crc\" DevicePath \"\"" Nov 27 00:12:58 crc kubenswrapper[4903]: I1127 00:12:58.972017 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20425297-6df8-4b47-bade-0dd5e5539827-util" (OuterVolumeSpecName: "util") pod "20425297-6df8-4b47-bade-0dd5e5539827" (UID: "20425297-6df8-4b47-bade-0dd5e5539827"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 27 00:12:59 crc kubenswrapper[4903]: I1127 00:12:59.014220 4903 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/20425297-6df8-4b47-bade-0dd5e5539827-util\") on node \"crc\" DevicePath \"\"" Nov 27 00:12:59 crc kubenswrapper[4903]: I1127 00:12:59.208110 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr" event={"ID":"20425297-6df8-4b47-bade-0dd5e5539827","Type":"ContainerDied","Data":"786b4b0cfe2a8629d2b88776b5792e619b056569b8c2f2cf38a8fee4515afa98"} Nov 27 00:12:59 crc kubenswrapper[4903]: I1127 00:12:59.208152 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="786b4b0cfe2a8629d2b88776b5792e619b056569b8c2f2cf38a8fee4515afa98" Nov 27 00:12:59 crc kubenswrapper[4903]: I1127 00:12:59.208287 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr" Nov 27 00:12:59 crc kubenswrapper[4903]: I1127 00:12:59.626392 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr" Nov 27 00:12:59 crc kubenswrapper[4903]: I1127 00:12:59.728003 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/cad43621-97ed-4d84-85dc-482bafe37257-util\") pod \"cad43621-97ed-4d84-85dc-482bafe37257\" (UID: \"cad43621-97ed-4d84-85dc-482bafe37257\") " Nov 27 00:12:59 crc kubenswrapper[4903]: I1127 00:12:59.728191 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/cad43621-97ed-4d84-85dc-482bafe37257-bundle\") pod \"cad43621-97ed-4d84-85dc-482bafe37257\" (UID: \"cad43621-97ed-4d84-85dc-482bafe37257\") " Nov 27 00:12:59 crc kubenswrapper[4903]: I1127 00:12:59.728229 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xwmnr\" (UniqueName: \"kubernetes.io/projected/cad43621-97ed-4d84-85dc-482bafe37257-kube-api-access-xwmnr\") pod \"cad43621-97ed-4d84-85dc-482bafe37257\" (UID: \"cad43621-97ed-4d84-85dc-482bafe37257\") " Nov 27 00:12:59 crc kubenswrapper[4903]: I1127 00:12:59.729369 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cad43621-97ed-4d84-85dc-482bafe37257-bundle" (OuterVolumeSpecName: "bundle") pod "cad43621-97ed-4d84-85dc-482bafe37257" (UID: "cad43621-97ed-4d84-85dc-482bafe37257"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 27 00:12:59 crc kubenswrapper[4903]: I1127 00:12:59.737841 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cad43621-97ed-4d84-85dc-482bafe37257-kube-api-access-xwmnr" (OuterVolumeSpecName: "kube-api-access-xwmnr") pod "cad43621-97ed-4d84-85dc-482bafe37257" (UID: "cad43621-97ed-4d84-85dc-482bafe37257"). InnerVolumeSpecName "kube-api-access-xwmnr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:12:59 crc kubenswrapper[4903]: I1127 00:12:59.832411 4903 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/cad43621-97ed-4d84-85dc-482bafe37257-bundle\") on node \"crc\" DevicePath \"\"" Nov 27 00:12:59 crc kubenswrapper[4903]: I1127 00:12:59.832464 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xwmnr\" (UniqueName: \"kubernetes.io/projected/cad43621-97ed-4d84-85dc-482bafe37257-kube-api-access-xwmnr\") on node \"crc\" DevicePath \"\"" Nov 27 00:12:59 crc kubenswrapper[4903]: I1127 00:12:59.952940 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cad43621-97ed-4d84-85dc-482bafe37257-util" (OuterVolumeSpecName: "util") pod "cad43621-97ed-4d84-85dc-482bafe37257" (UID: "cad43621-97ed-4d84-85dc-482bafe37257"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 27 00:13:00 crc kubenswrapper[4903]: I1127 00:13:00.036302 4903 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/cad43621-97ed-4d84-85dc-482bafe37257-util\") on node \"crc\" DevicePath \"\"" Nov 27 00:13:00 crc kubenswrapper[4903]: I1127 00:13:00.221928 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr" event={"ID":"cad43621-97ed-4d84-85dc-482bafe37257","Type":"ContainerDied","Data":"bbf09c7e67f59f766ec6addc40493f4ceca24863b9ce1747e8d21cf88fc800b4"} Nov 27 00:13:00 crc kubenswrapper[4903]: I1127 00:13:00.221965 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bbf09c7e67f59f766ec6addc40493f4ceca24863b9ce1747e8d21cf88fc800b4" Nov 27 00:13:00 crc kubenswrapper[4903]: I1127 00:13:00.222027 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr" Nov 27 00:13:01 crc kubenswrapper[4903]: I1127 00:13:01.981035 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 27 00:13:01 crc kubenswrapper[4903]: I1127 00:13:01.981361 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 27 00:13:05 crc kubenswrapper[4903]: I1127 00:13:05.288644 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-4tfsr"] Nov 27 00:13:05 crc kubenswrapper[4903]: E1127 00:13:05.289495 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cad43621-97ed-4d84-85dc-482bafe37257" containerName="pull" Nov 27 00:13:05 crc kubenswrapper[4903]: I1127 00:13:05.289508 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="cad43621-97ed-4d84-85dc-482bafe37257" containerName="pull" Nov 27 00:13:05 crc kubenswrapper[4903]: E1127 00:13:05.289540 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20425297-6df8-4b47-bade-0dd5e5539827" containerName="extract" Nov 27 00:13:05 crc kubenswrapper[4903]: I1127 00:13:05.289546 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="20425297-6df8-4b47-bade-0dd5e5539827" containerName="extract" Nov 27 00:13:05 crc kubenswrapper[4903]: E1127 00:13:05.289589 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20425297-6df8-4b47-bade-0dd5e5539827" containerName="pull" Nov 27 00:13:05 crc kubenswrapper[4903]: I1127 00:13:05.289595 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="20425297-6df8-4b47-bade-0dd5e5539827" containerName="pull" Nov 27 00:13:05 crc kubenswrapper[4903]: E1127 00:13:05.289609 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20425297-6df8-4b47-bade-0dd5e5539827" containerName="util" Nov 27 00:13:05 crc kubenswrapper[4903]: I1127 00:13:05.289615 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="20425297-6df8-4b47-bade-0dd5e5539827" containerName="util" Nov 27 00:13:05 crc kubenswrapper[4903]: E1127 00:13:05.289628 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cad43621-97ed-4d84-85dc-482bafe37257" containerName="extract" Nov 27 00:13:05 crc kubenswrapper[4903]: I1127 00:13:05.289633 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="cad43621-97ed-4d84-85dc-482bafe37257" containerName="extract" Nov 27 00:13:05 crc kubenswrapper[4903]: E1127 00:13:05.289644 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cad43621-97ed-4d84-85dc-482bafe37257" containerName="util" Nov 27 00:13:05 crc kubenswrapper[4903]: I1127 00:13:05.289650 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="cad43621-97ed-4d84-85dc-482bafe37257" containerName="util" Nov 27 00:13:05 crc kubenswrapper[4903]: I1127 00:13:05.289954 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="cad43621-97ed-4d84-85dc-482bafe37257" containerName="extract" Nov 27 00:13:05 crc kubenswrapper[4903]: I1127 00:13:05.289968 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="20425297-6df8-4b47-bade-0dd5e5539827" containerName="extract" Nov 27 00:13:05 crc kubenswrapper[4903]: I1127 00:13:05.290794 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-4tfsr" Nov 27 00:13:05 crc kubenswrapper[4903]: I1127 00:13:05.310193 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-4tfsr"] Nov 27 00:13:05 crc kubenswrapper[4903]: I1127 00:13:05.450954 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ccxrm\" (UniqueName: \"kubernetes.io/projected/aafbc4ae-891a-463f-8c50-02a76fdb8b75-kube-api-access-ccxrm\") pod \"nmstate-operator-5b5b58f5c8-4tfsr\" (UID: \"aafbc4ae-891a-463f-8c50-02a76fdb8b75\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-4tfsr" Nov 27 00:13:05 crc kubenswrapper[4903]: I1127 00:13:05.553981 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ccxrm\" (UniqueName: \"kubernetes.io/projected/aafbc4ae-891a-463f-8c50-02a76fdb8b75-kube-api-access-ccxrm\") pod \"nmstate-operator-5b5b58f5c8-4tfsr\" (UID: \"aafbc4ae-891a-463f-8c50-02a76fdb8b75\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-4tfsr" Nov 27 00:13:05 crc kubenswrapper[4903]: I1127 00:13:05.596683 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ccxrm\" (UniqueName: \"kubernetes.io/projected/aafbc4ae-891a-463f-8c50-02a76fdb8b75-kube-api-access-ccxrm\") pod \"nmstate-operator-5b5b58f5c8-4tfsr\" (UID: \"aafbc4ae-891a-463f-8c50-02a76fdb8b75\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-4tfsr" Nov 27 00:13:05 crc kubenswrapper[4903]: I1127 00:13:05.620778 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-4tfsr" Nov 27 00:13:05 crc kubenswrapper[4903]: I1127 00:13:05.892464 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-hw6vb_a9e12d32-ef72-446c-b317-8d00a90a651b/kube-rbac-proxy/0.log" Nov 27 00:13:06 crc kubenswrapper[4903]: I1127 00:13:06.032544 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-hw6vb_a9e12d32-ef72-446c-b317-8d00a90a651b/controller/0.log" Nov 27 00:13:06 crc kubenswrapper[4903]: I1127 00:13:06.113801 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-4tfsr"] Nov 27 00:13:06 crc kubenswrapper[4903]: I1127 00:13:06.291053 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-4tfsr" event={"ID":"aafbc4ae-891a-463f-8c50-02a76fdb8b75","Type":"ContainerStarted","Data":"0cdecef2a4f282dd0d28d0c2865e27120ffeaac347112f8aaa38297462dfc25a"} Nov 27 00:13:06 crc kubenswrapper[4903]: I1127 00:13:06.294330 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/cp-frr-files/0.log" Nov 27 00:13:06 crc kubenswrapper[4903]: I1127 00:13:06.525188 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/cp-reloader/0.log" Nov 27 00:13:06 crc kubenswrapper[4903]: I1127 00:13:06.529924 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/cp-reloader/0.log" Nov 27 00:13:06 crc kubenswrapper[4903]: I1127 00:13:06.588242 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/cp-metrics/0.log" Nov 27 00:13:06 crc kubenswrapper[4903]: I1127 00:13:06.612015 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/cp-frr-files/0.log" Nov 27 00:13:06 crc kubenswrapper[4903]: I1127 00:13:06.877462 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/cp-frr-files/0.log" Nov 27 00:13:06 crc kubenswrapper[4903]: I1127 00:13:06.918590 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/cp-reloader/0.log" Nov 27 00:13:06 crc kubenswrapper[4903]: I1127 00:13:06.931150 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/cp-metrics/0.log" Nov 27 00:13:06 crc kubenswrapper[4903]: I1127 00:13:06.974363 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/cp-metrics/0.log" Nov 27 00:13:07 crc kubenswrapper[4903]: I1127 00:13:07.192863 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/cp-reloader/0.log" Nov 27 00:13:07 crc kubenswrapper[4903]: I1127 00:13:07.245867 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/cp-frr-files/0.log" Nov 27 00:13:07 crc kubenswrapper[4903]: I1127 00:13:07.250296 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/controller/0.log" Nov 27 00:13:07 crc kubenswrapper[4903]: I1127 00:13:07.257817 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/cp-metrics/0.log" Nov 27 00:13:07 crc kubenswrapper[4903]: I1127 00:13:07.504149 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/frr-metrics/0.log" Nov 27 00:13:07 crc kubenswrapper[4903]: I1127 00:13:07.581316 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/kube-rbac-proxy-frr/0.log" Nov 27 00:13:07 crc kubenswrapper[4903]: I1127 00:13:07.656303 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/kube-rbac-proxy/0.log" Nov 27 00:13:08 crc kubenswrapper[4903]: I1127 00:13:08.040783 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/reloader/0.log" Nov 27 00:13:08 crc kubenswrapper[4903]: I1127 00:13:08.127733 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-s9t6m_5317f83c-9fcf-4df1-9823-bb92767545a9/frr-k8s-webhook-server/0.log" Nov 27 00:13:08 crc kubenswrapper[4903]: I1127 00:13:08.532133 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-57594f7c4c-gdzqb_b5900302-4880-4732-a477-8ed6cf3bfec3/manager/2.log" Nov 27 00:13:08 crc kubenswrapper[4903]: I1127 00:13:08.553417 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-57594f7c4c-gdzqb_b5900302-4880-4732-a477-8ed6cf3bfec3/manager/3.log" Nov 27 00:13:09 crc kubenswrapper[4903]: I1127 00:13:09.215673 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-d76ff59f5-8bdc9_1c731f8b-9333-4076-b193-54255a31e938/webhook-server/0.log" Nov 27 00:13:09 crc kubenswrapper[4903]: I1127 00:13:09.355150 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qxrcx_2406eb0a-073a-4339-98f2-baa11ceacaa4/frr/0.log" Nov 27 00:13:09 crc kubenswrapper[4903]: I1127 00:13:09.453947 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-f2g89_61e82f3d-2aca-46e7-bd0f-12c8b492c14e/kube-rbac-proxy/0.log" Nov 27 00:13:10 crc kubenswrapper[4903]: I1127 00:13:10.494892 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-f2g89_61e82f3d-2aca-46e7-bd0f-12c8b492c14e/speaker/0.log" Nov 27 00:13:11 crc kubenswrapper[4903]: I1127 00:13:11.372049 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-ctqlb"] Nov 27 00:13:11 crc kubenswrapper[4903]: I1127 00:13:11.372626 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-ctqlb" podUID="500a4a0f-2474-482b-9f47-7304d9bd35e9" containerName="nmstate-metrics" containerID="cri-o://0871b51c162da8a73c48fd305c6cf3f3eecdc5480c72e728c5297466fcdb012d" gracePeriod=30 Nov 27 00:13:11 crc kubenswrapper[4903]: I1127 00:13:11.372655 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-ctqlb" podUID="500a4a0f-2474-482b-9f47-7304d9bd35e9" containerName="kube-rbac-proxy" containerID="cri-o://174c9695ee6fee0891835591881029f03ec0a1286a2bdd848db77d6d1a91cb8d" gracePeriod=30 Nov 27 00:13:11 crc kubenswrapper[4903]: I1127 00:13:11.392577 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-9rhdl"] Nov 27 00:13:11 crc kubenswrapper[4903]: I1127 00:13:11.392786 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-9rhdl" podUID="8ebd57a0-155f-481c-9d2e-11c69e14b6fc" containerName="nmstate-webhook" containerID="cri-o://8d6466c263682ba3bb35dc38b949705667f52ffcab0194829e8e76a4c5ae8358" gracePeriod=30 Nov 27 00:13:11 crc kubenswrapper[4903]: I1127 00:13:11.409566 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-4tfsr" event={"ID":"aafbc4ae-891a-463f-8c50-02a76fdb8b75","Type":"ContainerStarted","Data":"56d73f551ffb445a813932b13bfac7533d7f6d3aab2096dce56cda3cbf6b6741"} Nov 27 00:13:11 crc kubenswrapper[4903]: I1127 00:13:11.420354 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-nmstate/nmstate-handler-v5kzk"] Nov 27 00:13:11 crc kubenswrapper[4903]: I1127 00:13:11.420553 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-nmstate/nmstate-handler-v5kzk" podUID="097195ec-5a3f-4d57-b864-264165398ff6" containerName="nmstate-handler" containerID="cri-o://bd2ed0461ad7a12beff26d3ca50f72f00d8808fad0db576de787d16344ca6d70" gracePeriod=30 Nov 27 00:13:11 crc kubenswrapper[4903]: I1127 00:13:11.455501 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-4tfsr" podStartSLOduration=1.949616538 podStartE2EDuration="6.455483983s" podCreationTimestamp="2025-11-27 00:13:05 +0000 UTC" firstStartedPulling="2025-11-27 00:13:06.120671012 +0000 UTC m=+6714.810905922" lastFinishedPulling="2025-11-27 00:13:10.626538457 +0000 UTC m=+6719.316773367" observedRunningTime="2025-11-27 00:13:11.444711375 +0000 UTC m=+6720.134946285" watchObservedRunningTime="2025-11-27 00:13:11.455483983 +0000 UTC m=+6720.145718893" Nov 27 00:13:11 crc kubenswrapper[4903]: I1127 00:13:11.525975 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-btnn5"] Nov 27 00:13:11 crc kubenswrapper[4903]: I1127 00:13:11.526532 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-nmstate/nmstate-operator-557fdffb88-btnn5" podUID="f0eef496-9727-4ee7-9c31-c2afcb9303c6" containerName="nmstate-operator" containerID="cri-o://fd1af25c5bae5d52241e330c0ae766ae2639da3bfa1be5e19900efc2a932d7d7" gracePeriod=30 Nov 27 00:13:11 crc kubenswrapper[4903]: I1127 00:13:11.577789 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-w4kwb"] Nov 27 00:13:11 crc kubenswrapper[4903]: I1127 00:13:11.579615 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-w4kwb" Nov 27 00:13:11 crc kubenswrapper[4903]: I1127 00:13:11.596569 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-w4kwb"] Nov 27 00:13:11 crc kubenswrapper[4903]: I1127 00:13:11.737498 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/6e7b354b-3cfa-4d69-8d23-b5ada914c3a4-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-w4kwb\" (UID: \"6e7b354b-3cfa-4d69-8d23-b5ada914c3a4\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-w4kwb" Nov 27 00:13:11 crc kubenswrapper[4903]: I1127 00:13:11.737650 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n788c\" (UniqueName: \"kubernetes.io/projected/6e7b354b-3cfa-4d69-8d23-b5ada914c3a4-kube-api-access-n788c\") pod \"nmstate-console-plugin-7fbb5f6569-w4kwb\" (UID: \"6e7b354b-3cfa-4d69-8d23-b5ada914c3a4\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-w4kwb" Nov 27 00:13:11 crc kubenswrapper[4903]: I1127 00:13:11.737717 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/6e7b354b-3cfa-4d69-8d23-b5ada914c3a4-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-w4kwb\" (UID: \"6e7b354b-3cfa-4d69-8d23-b5ada914c3a4\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-w4kwb" Nov 27 00:13:11 crc kubenswrapper[4903]: I1127 00:13:11.840333 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n788c\" (UniqueName: \"kubernetes.io/projected/6e7b354b-3cfa-4d69-8d23-b5ada914c3a4-kube-api-access-n788c\") pod \"nmstate-console-plugin-7fbb5f6569-w4kwb\" (UID: \"6e7b354b-3cfa-4d69-8d23-b5ada914c3a4\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-w4kwb" Nov 27 00:13:11 crc kubenswrapper[4903]: I1127 00:13:11.840406 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/6e7b354b-3cfa-4d69-8d23-b5ada914c3a4-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-w4kwb\" (UID: \"6e7b354b-3cfa-4d69-8d23-b5ada914c3a4\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-w4kwb" Nov 27 00:13:11 crc kubenswrapper[4903]: I1127 00:13:11.840669 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/6e7b354b-3cfa-4d69-8d23-b5ada914c3a4-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-w4kwb\" (UID: \"6e7b354b-3cfa-4d69-8d23-b5ada914c3a4\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-w4kwb" Nov 27 00:13:11 crc kubenswrapper[4903]: I1127 00:13:11.842433 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/6e7b354b-3cfa-4d69-8d23-b5ada914c3a4-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-w4kwb\" (UID: \"6e7b354b-3cfa-4d69-8d23-b5ada914c3a4\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-w4kwb" Nov 27 00:13:11 crc kubenswrapper[4903]: I1127 00:13:11.864368 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/6e7b354b-3cfa-4d69-8d23-b5ada914c3a4-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-w4kwb\" (UID: \"6e7b354b-3cfa-4d69-8d23-b5ada914c3a4\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-w4kwb" Nov 27 00:13:11 crc kubenswrapper[4903]: I1127 00:13:11.870321 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n788c\" (UniqueName: \"kubernetes.io/projected/6e7b354b-3cfa-4d69-8d23-b5ada914c3a4-kube-api-access-n788c\") pod \"nmstate-console-plugin-7fbb5f6569-w4kwb\" (UID: \"6e7b354b-3cfa-4d69-8d23-b5ada914c3a4\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-w4kwb" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.050398 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-w4kwb" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.080565 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-v5kzk" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.237347 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-9g9bn"] Nov 27 00:13:12 crc kubenswrapper[4903]: E1127 00:13:12.238123 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="097195ec-5a3f-4d57-b864-264165398ff6" containerName="nmstate-handler" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.238142 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="097195ec-5a3f-4d57-b864-264165398ff6" containerName="nmstate-handler" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.244464 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="097195ec-5a3f-4d57-b864-264165398ff6" containerName="nmstate-handler" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.255172 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-9g9bn" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.260837 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/097195ec-5a3f-4d57-b864-264165398ff6-nmstate-lock\") pod \"097195ec-5a3f-4d57-b864-264165398ff6\" (UID: \"097195ec-5a3f-4d57-b864-264165398ff6\") " Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.260936 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/097195ec-5a3f-4d57-b864-264165398ff6-ovs-socket\") pod \"097195ec-5a3f-4d57-b864-264165398ff6\" (UID: \"097195ec-5a3f-4d57-b864-264165398ff6\") " Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.261036 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n9jsq\" (UniqueName: \"kubernetes.io/projected/097195ec-5a3f-4d57-b864-264165398ff6-kube-api-access-n9jsq\") pod \"097195ec-5a3f-4d57-b864-264165398ff6\" (UID: \"097195ec-5a3f-4d57-b864-264165398ff6\") " Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.261049 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/097195ec-5a3f-4d57-b864-264165398ff6-nmstate-lock" (OuterVolumeSpecName: "nmstate-lock") pod "097195ec-5a3f-4d57-b864-264165398ff6" (UID: "097195ec-5a3f-4d57-b864-264165398ff6"). InnerVolumeSpecName "nmstate-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.261090 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/097195ec-5a3f-4d57-b864-264165398ff6-ovs-socket" (OuterVolumeSpecName: "ovs-socket") pod "097195ec-5a3f-4d57-b864-264165398ff6" (UID: "097195ec-5a3f-4d57-b864-264165398ff6"). InnerVolumeSpecName "ovs-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.261180 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/097195ec-5a3f-4d57-b864-264165398ff6-dbus-socket\") pod \"097195ec-5a3f-4d57-b864-264165398ff6\" (UID: \"097195ec-5a3f-4d57-b864-264165398ff6\") " Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.262062 4903 reconciler_common.go:293] "Volume detached for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/097195ec-5a3f-4d57-b864-264165398ff6-nmstate-lock\") on node \"crc\" DevicePath \"\"" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.262082 4903 reconciler_common.go:293] "Volume detached for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/097195ec-5a3f-4d57-b864-264165398ff6-ovs-socket\") on node \"crc\" DevicePath \"\"" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.262127 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/097195ec-5a3f-4d57-b864-264165398ff6-dbus-socket" (OuterVolumeSpecName: "dbus-socket") pod "097195ec-5a3f-4d57-b864-264165398ff6" (UID: "097195ec-5a3f-4d57-b864-264165398ff6"). InnerVolumeSpecName "dbus-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.272945 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-9rhdl" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.283745 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/097195ec-5a3f-4d57-b864-264165398ff6-kube-api-access-n9jsq" (OuterVolumeSpecName: "kube-api-access-n9jsq") pod "097195ec-5a3f-4d57-b864-264165398ff6" (UID: "097195ec-5a3f-4d57-b864-264165398ff6"). InnerVolumeSpecName "kube-api-access-n9jsq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.363502 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/8ebd57a0-155f-481c-9d2e-11c69e14b6fc-tls-key-pair\") pod \"8ebd57a0-155f-481c-9d2e-11c69e14b6fc\" (UID: \"8ebd57a0-155f-481c-9d2e-11c69e14b6fc\") " Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.364133 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l7rcg\" (UniqueName: \"kubernetes.io/projected/8ebd57a0-155f-481c-9d2e-11c69e14b6fc-kube-api-access-l7rcg\") pod \"8ebd57a0-155f-481c-9d2e-11c69e14b6fc\" (UID: \"8ebd57a0-155f-481c-9d2e-11c69e14b6fc\") " Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.366330 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/da368d4d-3a8a-428e-9090-6fc0a7609bcd-nmstate-lock\") pod \"nmstate-handler-9g9bn\" (UID: \"da368d4d-3a8a-428e-9090-6fc0a7609bcd\") " pod="openshift-nmstate/nmstate-handler-9g9bn" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.367084 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qllnn\" (UniqueName: \"kubernetes.io/projected/da368d4d-3a8a-428e-9090-6fc0a7609bcd-kube-api-access-qllnn\") pod \"nmstate-handler-9g9bn\" (UID: \"da368d4d-3a8a-428e-9090-6fc0a7609bcd\") " pod="openshift-nmstate/nmstate-handler-9g9bn" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.367172 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/da368d4d-3a8a-428e-9090-6fc0a7609bcd-dbus-socket\") pod \"nmstate-handler-9g9bn\" (UID: \"da368d4d-3a8a-428e-9090-6fc0a7609bcd\") " pod="openshift-nmstate/nmstate-handler-9g9bn" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.367247 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/da368d4d-3a8a-428e-9090-6fc0a7609bcd-ovs-socket\") pod \"nmstate-handler-9g9bn\" (UID: \"da368d4d-3a8a-428e-9090-6fc0a7609bcd\") " pod="openshift-nmstate/nmstate-handler-9g9bn" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.367422 4903 reconciler_common.go:293] "Volume detached for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/097195ec-5a3f-4d57-b864-264165398ff6-dbus-socket\") on node \"crc\" DevicePath \"\"" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.367459 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n9jsq\" (UniqueName: \"kubernetes.io/projected/097195ec-5a3f-4d57-b864-264165398ff6-kube-api-access-n9jsq\") on node \"crc\" DevicePath \"\"" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.379264 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ebd57a0-155f-481c-9d2e-11c69e14b6fc-kube-api-access-l7rcg" (OuterVolumeSpecName: "kube-api-access-l7rcg") pod "8ebd57a0-155f-481c-9d2e-11c69e14b6fc" (UID: "8ebd57a0-155f-481c-9d2e-11c69e14b6fc"). InnerVolumeSpecName "kube-api-access-l7rcg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.379353 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ebd57a0-155f-481c-9d2e-11c69e14b6fc-tls-key-pair" (OuterVolumeSpecName: "tls-key-pair") pod "8ebd57a0-155f-481c-9d2e-11c69e14b6fc" (UID: "8ebd57a0-155f-481c-9d2e-11c69e14b6fc"). InnerVolumeSpecName "tls-key-pair". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.428111 4903 generic.go:334] "Generic (PLEG): container finished" podID="f0eef496-9727-4ee7-9c31-c2afcb9303c6" containerID="fd1af25c5bae5d52241e330c0ae766ae2639da3bfa1be5e19900efc2a932d7d7" exitCode=0 Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.428182 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-btnn5" event={"ID":"f0eef496-9727-4ee7-9c31-c2afcb9303c6","Type":"ContainerDied","Data":"fd1af25c5bae5d52241e330c0ae766ae2639da3bfa1be5e19900efc2a932d7d7"} Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.434657 4903 generic.go:334] "Generic (PLEG): container finished" podID="8ebd57a0-155f-481c-9d2e-11c69e14b6fc" containerID="8d6466c263682ba3bb35dc38b949705667f52ffcab0194829e8e76a4c5ae8358" exitCode=0 Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.434808 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-9rhdl" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.434831 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-9rhdl" event={"ID":"8ebd57a0-155f-481c-9d2e-11c69e14b6fc","Type":"ContainerDied","Data":"8d6466c263682ba3bb35dc38b949705667f52ffcab0194829e8e76a4c5ae8358"} Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.434891 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-9rhdl" event={"ID":"8ebd57a0-155f-481c-9d2e-11c69e14b6fc","Type":"ContainerDied","Data":"fc7f99c075d7a73d11f2569401320bb890c34f079bdeacaf302625280477252c"} Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.434917 4903 scope.go:117] "RemoveContainer" containerID="8d6466c263682ba3bb35dc38b949705667f52ffcab0194829e8e76a4c5ae8358" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.461462 4903 generic.go:334] "Generic (PLEG): container finished" podID="097195ec-5a3f-4d57-b864-264165398ff6" containerID="bd2ed0461ad7a12beff26d3ca50f72f00d8808fad0db576de787d16344ca6d70" exitCode=0 Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.461543 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-v5kzk" event={"ID":"097195ec-5a3f-4d57-b864-264165398ff6","Type":"ContainerDied","Data":"bd2ed0461ad7a12beff26d3ca50f72f00d8808fad0db576de787d16344ca6d70"} Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.461567 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-v5kzk" event={"ID":"097195ec-5a3f-4d57-b864-264165398ff6","Type":"ContainerDied","Data":"1c367868ac962e8b251196a3ead09e84ad156747dba785ed829e0ab847e85bbf"} Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.461627 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-v5kzk" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.482770 4903 generic.go:334] "Generic (PLEG): container finished" podID="500a4a0f-2474-482b-9f47-7304d9bd35e9" containerID="0871b51c162da8a73c48fd305c6cf3f3eecdc5480c72e728c5297466fcdb012d" exitCode=0 Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.482864 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-ctqlb" event={"ID":"500a4a0f-2474-482b-9f47-7304d9bd35e9","Type":"ContainerDied","Data":"0871b51c162da8a73c48fd305c6cf3f3eecdc5480c72e728c5297466fcdb012d"} Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.484980 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/da368d4d-3a8a-428e-9090-6fc0a7609bcd-nmstate-lock\") pod \"nmstate-handler-9g9bn\" (UID: \"da368d4d-3a8a-428e-9090-6fc0a7609bcd\") " pod="openshift-nmstate/nmstate-handler-9g9bn" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.485633 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qllnn\" (UniqueName: \"kubernetes.io/projected/da368d4d-3a8a-428e-9090-6fc0a7609bcd-kube-api-access-qllnn\") pod \"nmstate-handler-9g9bn\" (UID: \"da368d4d-3a8a-428e-9090-6fc0a7609bcd\") " pod="openshift-nmstate/nmstate-handler-9g9bn" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.485798 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/da368d4d-3a8a-428e-9090-6fc0a7609bcd-dbus-socket\") pod \"nmstate-handler-9g9bn\" (UID: \"da368d4d-3a8a-428e-9090-6fc0a7609bcd\") " pod="openshift-nmstate/nmstate-handler-9g9bn" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.485844 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/da368d4d-3a8a-428e-9090-6fc0a7609bcd-ovs-socket\") pod \"nmstate-handler-9g9bn\" (UID: \"da368d4d-3a8a-428e-9090-6fc0a7609bcd\") " pod="openshift-nmstate/nmstate-handler-9g9bn" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.485807 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/da368d4d-3a8a-428e-9090-6fc0a7609bcd-nmstate-lock\") pod \"nmstate-handler-9g9bn\" (UID: \"da368d4d-3a8a-428e-9090-6fc0a7609bcd\") " pod="openshift-nmstate/nmstate-handler-9g9bn" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.496534 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/da368d4d-3a8a-428e-9090-6fc0a7609bcd-dbus-socket\") pod \"nmstate-handler-9g9bn\" (UID: \"da368d4d-3a8a-428e-9090-6fc0a7609bcd\") " pod="openshift-nmstate/nmstate-handler-9g9bn" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.496660 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/da368d4d-3a8a-428e-9090-6fc0a7609bcd-ovs-socket\") pod \"nmstate-handler-9g9bn\" (UID: \"da368d4d-3a8a-428e-9090-6fc0a7609bcd\") " pod="openshift-nmstate/nmstate-handler-9g9bn" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.497293 4903 reconciler_common.go:293] "Volume detached for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/8ebd57a0-155f-481c-9d2e-11c69e14b6fc-tls-key-pair\") on node \"crc\" DevicePath \"\"" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.497329 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l7rcg\" (UniqueName: \"kubernetes.io/projected/8ebd57a0-155f-481c-9d2e-11c69e14b6fc-kube-api-access-l7rcg\") on node \"crc\" DevicePath \"\"" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.520286 4903 scope.go:117] "RemoveContainer" containerID="8d6466c263682ba3bb35dc38b949705667f52ffcab0194829e8e76a4c5ae8358" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.522209 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-445k7"] Nov 27 00:13:12 crc kubenswrapper[4903]: E1127 00:13:12.522864 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ebd57a0-155f-481c-9d2e-11c69e14b6fc" containerName="nmstate-webhook" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.522881 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ebd57a0-155f-481c-9d2e-11c69e14b6fc" containerName="nmstate-webhook" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.523208 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ebd57a0-155f-481c-9d2e-11c69e14b6fc" containerName="nmstate-webhook" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.524373 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-445k7" Nov 27 00:13:12 crc kubenswrapper[4903]: E1127 00:13:12.531588 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d6466c263682ba3bb35dc38b949705667f52ffcab0194829e8e76a4c5ae8358\": container with ID starting with 8d6466c263682ba3bb35dc38b949705667f52ffcab0194829e8e76a4c5ae8358 not found: ID does not exist" containerID="8d6466c263682ba3bb35dc38b949705667f52ffcab0194829e8e76a4c5ae8358" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.531642 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d6466c263682ba3bb35dc38b949705667f52ffcab0194829e8e76a4c5ae8358"} err="failed to get container status \"8d6466c263682ba3bb35dc38b949705667f52ffcab0194829e8e76a4c5ae8358\": rpc error: code = NotFound desc = could not find container \"8d6466c263682ba3bb35dc38b949705667f52ffcab0194829e8e76a4c5ae8358\": container with ID starting with 8d6466c263682ba3bb35dc38b949705667f52ffcab0194829e8e76a4c5ae8358 not found: ID does not exist" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.531681 4903 scope.go:117] "RemoveContainer" containerID="bd2ed0461ad7a12beff26d3ca50f72f00d8808fad0db576de787d16344ca6d70" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.532078 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.543163 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qllnn\" (UniqueName: \"kubernetes.io/projected/da368d4d-3a8a-428e-9090-6fc0a7609bcd-kube-api-access-qllnn\") pod \"nmstate-handler-9g9bn\" (UID: \"da368d4d-3a8a-428e-9090-6fc0a7609bcd\") " pod="openshift-nmstate/nmstate-handler-9g9bn" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.546493 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-9rhdl"] Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.568008 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-9rhdl"] Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.596350 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-445k7"] Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.597837 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-9g9bn" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.619107 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-btnn5" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.620755 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-nmstate/nmstate-handler-v5kzk"] Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.640894 4903 scope.go:117] "RemoveContainer" containerID="bd2ed0461ad7a12beff26d3ca50f72f00d8808fad0db576de787d16344ca6d70" Nov 27 00:13:12 crc kubenswrapper[4903]: E1127 00:13:12.649972 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd2ed0461ad7a12beff26d3ca50f72f00d8808fad0db576de787d16344ca6d70\": container with ID starting with bd2ed0461ad7a12beff26d3ca50f72f00d8808fad0db576de787d16344ca6d70 not found: ID does not exist" containerID="bd2ed0461ad7a12beff26d3ca50f72f00d8808fad0db576de787d16344ca6d70" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.650155 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd2ed0461ad7a12beff26d3ca50f72f00d8808fad0db576de787d16344ca6d70"} err="failed to get container status \"bd2ed0461ad7a12beff26d3ca50f72f00d8808fad0db576de787d16344ca6d70\": rpc error: code = NotFound desc = could not find container \"bd2ed0461ad7a12beff26d3ca50f72f00d8808fad0db576de787d16344ca6d70\": container with ID starting with bd2ed0461ad7a12beff26d3ca50f72f00d8808fad0db576de787d16344ca6d70 not found: ID does not exist" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.674432 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-nmstate/nmstate-handler-v5kzk"] Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.701816 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/dff4245e-e0aa-4425-a4b2-45235c893470-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-445k7\" (UID: \"dff4245e-e0aa-4425-a4b2-45235c893470\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-445k7" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.702055 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfvp6\" (UniqueName: \"kubernetes.io/projected/dff4245e-e0aa-4425-a4b2-45235c893470-kube-api-access-tfvp6\") pod \"nmstate-webhook-5f6d4c5ccb-445k7\" (UID: \"dff4245e-e0aa-4425-a4b2-45235c893470\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-445k7" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.808516 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zplnf\" (UniqueName: \"kubernetes.io/projected/f0eef496-9727-4ee7-9c31-c2afcb9303c6-kube-api-access-zplnf\") pod \"f0eef496-9727-4ee7-9c31-c2afcb9303c6\" (UID: \"f0eef496-9727-4ee7-9c31-c2afcb9303c6\") " Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.809592 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/dff4245e-e0aa-4425-a4b2-45235c893470-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-445k7\" (UID: \"dff4245e-e0aa-4425-a4b2-45235c893470\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-445k7" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.809638 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfvp6\" (UniqueName: \"kubernetes.io/projected/dff4245e-e0aa-4425-a4b2-45235c893470-kube-api-access-tfvp6\") pod \"nmstate-webhook-5f6d4c5ccb-445k7\" (UID: \"dff4245e-e0aa-4425-a4b2-45235c893470\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-445k7" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.816335 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f0eef496-9727-4ee7-9c31-c2afcb9303c6-kube-api-access-zplnf" (OuterVolumeSpecName: "kube-api-access-zplnf") pod "f0eef496-9727-4ee7-9c31-c2afcb9303c6" (UID: "f0eef496-9727-4ee7-9c31-c2afcb9303c6"). InnerVolumeSpecName "kube-api-access-zplnf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.833531 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zplnf\" (UniqueName: \"kubernetes.io/projected/f0eef496-9727-4ee7-9c31-c2afcb9303c6-kube-api-access-zplnf\") on node \"crc\" DevicePath \"\"" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.856737 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/dff4245e-e0aa-4425-a4b2-45235c893470-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-445k7\" (UID: \"dff4245e-e0aa-4425-a4b2-45235c893470\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-445k7" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.863271 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfvp6\" (UniqueName: \"kubernetes.io/projected/dff4245e-e0aa-4425-a4b2-45235c893470-kube-api-access-tfvp6\") pod \"nmstate-webhook-5f6d4c5ccb-445k7\" (UID: \"dff4245e-e0aa-4425-a4b2-45235c893470\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-445k7" Nov 27 00:13:12 crc kubenswrapper[4903]: I1127 00:13:12.912253 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-445k7" Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.137119 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-w4kwb"] Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.403327 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-ctqlb" Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.513286 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-zhc4c"] Nov 27 00:13:13 crc kubenswrapper[4903]: E1127 00:13:13.513809 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="500a4a0f-2474-482b-9f47-7304d9bd35e9" containerName="nmstate-metrics" Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.513821 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="500a4a0f-2474-482b-9f47-7304d9bd35e9" containerName="nmstate-metrics" Nov 27 00:13:13 crc kubenswrapper[4903]: E1127 00:13:13.513872 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0eef496-9727-4ee7-9c31-c2afcb9303c6" containerName="nmstate-operator" Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.513878 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0eef496-9727-4ee7-9c31-c2afcb9303c6" containerName="nmstate-operator" Nov 27 00:13:13 crc kubenswrapper[4903]: E1127 00:13:13.513892 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="500a4a0f-2474-482b-9f47-7304d9bd35e9" containerName="kube-rbac-proxy" Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.513899 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="500a4a0f-2474-482b-9f47-7304d9bd35e9" containerName="kube-rbac-proxy" Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.514139 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="f0eef496-9727-4ee7-9c31-c2afcb9303c6" containerName="nmstate-operator" Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.514167 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="500a4a0f-2474-482b-9f47-7304d9bd35e9" containerName="kube-rbac-proxy" Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.514186 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="500a4a0f-2474-482b-9f47-7304d9bd35e9" containerName="nmstate-metrics" Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.515479 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-zhc4c" Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.517228 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-9g9bn" event={"ID":"da368d4d-3a8a-428e-9090-6fc0a7609bcd","Type":"ContainerStarted","Data":"c03c07c7bcaeae1d86f71d43b18691631621ca35df4143f665359af01664ef15"} Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.537901 4903 generic.go:334] "Generic (PLEG): container finished" podID="500a4a0f-2474-482b-9f47-7304d9bd35e9" containerID="174c9695ee6fee0891835591881029f03ec0a1286a2bdd848db77d6d1a91cb8d" exitCode=0 Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.538031 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-ctqlb" event={"ID":"500a4a0f-2474-482b-9f47-7304d9bd35e9","Type":"ContainerDied","Data":"174c9695ee6fee0891835591881029f03ec0a1286a2bdd848db77d6d1a91cb8d"} Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.538059 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-ctqlb" event={"ID":"500a4a0f-2474-482b-9f47-7304d9bd35e9","Type":"ContainerDied","Data":"1fe52479eb44490553a277291b7fb7d8545eefaaf173de5d58d173e551720db2"} Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.538075 4903 scope.go:117] "RemoveContainer" containerID="174c9695ee6fee0891835591881029f03ec0a1286a2bdd848db77d6d1a91cb8d" Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.538224 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-ctqlb" Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.542019 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-zhc4c"] Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.548645 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-btnn5" Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.548645 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-btnn5" event={"ID":"f0eef496-9727-4ee7-9c31-c2afcb9303c6","Type":"ContainerDied","Data":"b5e964ef175c729336898674e21f35bbba4e972bb274657ebb64b55e378bd68c"} Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.561580 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-w4kwb" event={"ID":"6e7b354b-3cfa-4d69-8d23-b5ada914c3a4","Type":"ContainerStarted","Data":"c083fb93eddc242c89a4a1836a9ee9c1c05c77177e40bcac77183a7f04e2d367"} Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.582720 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-774s6\" (UniqueName: \"kubernetes.io/projected/500a4a0f-2474-482b-9f47-7304d9bd35e9-kube-api-access-774s6\") pod \"500a4a0f-2474-482b-9f47-7304d9bd35e9\" (UID: \"500a4a0f-2474-482b-9f47-7304d9bd35e9\") " Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.600830 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xdd4b\" (UniqueName: \"kubernetes.io/projected/869e4577-d4a9-4f47-9010-6bea93d0e841-kube-api-access-xdd4b\") pod \"nmstate-metrics-7f946cbc9-zhc4c\" (UID: \"869e4577-d4a9-4f47-9010-6bea93d0e841\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-zhc4c" Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.601070 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/500a4a0f-2474-482b-9f47-7304d9bd35e9-kube-api-access-774s6" (OuterVolumeSpecName: "kube-api-access-774s6") pod "500a4a0f-2474-482b-9f47-7304d9bd35e9" (UID: "500a4a0f-2474-482b-9f47-7304d9bd35e9"). InnerVolumeSpecName "kube-api-access-774s6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.604903 4903 scope.go:117] "RemoveContainer" containerID="0871b51c162da8a73c48fd305c6cf3f3eecdc5480c72e728c5297466fcdb012d" Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.606739 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-btnn5"] Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.618938 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-btnn5"] Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.637370 4903 scope.go:117] "RemoveContainer" containerID="174c9695ee6fee0891835591881029f03ec0a1286a2bdd848db77d6d1a91cb8d" Nov 27 00:13:13 crc kubenswrapper[4903]: E1127 00:13:13.637875 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"174c9695ee6fee0891835591881029f03ec0a1286a2bdd848db77d6d1a91cb8d\": container with ID starting with 174c9695ee6fee0891835591881029f03ec0a1286a2bdd848db77d6d1a91cb8d not found: ID does not exist" containerID="174c9695ee6fee0891835591881029f03ec0a1286a2bdd848db77d6d1a91cb8d" Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.637948 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"174c9695ee6fee0891835591881029f03ec0a1286a2bdd848db77d6d1a91cb8d"} err="failed to get container status \"174c9695ee6fee0891835591881029f03ec0a1286a2bdd848db77d6d1a91cb8d\": rpc error: code = NotFound desc = could not find container \"174c9695ee6fee0891835591881029f03ec0a1286a2bdd848db77d6d1a91cb8d\": container with ID starting with 174c9695ee6fee0891835591881029f03ec0a1286a2bdd848db77d6d1a91cb8d not found: ID does not exist" Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.637983 4903 scope.go:117] "RemoveContainer" containerID="0871b51c162da8a73c48fd305c6cf3f3eecdc5480c72e728c5297466fcdb012d" Nov 27 00:13:13 crc kubenswrapper[4903]: E1127 00:13:13.638546 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0871b51c162da8a73c48fd305c6cf3f3eecdc5480c72e728c5297466fcdb012d\": container with ID starting with 0871b51c162da8a73c48fd305c6cf3f3eecdc5480c72e728c5297466fcdb012d not found: ID does not exist" containerID="0871b51c162da8a73c48fd305c6cf3f3eecdc5480c72e728c5297466fcdb012d" Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.638571 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0871b51c162da8a73c48fd305c6cf3f3eecdc5480c72e728c5297466fcdb012d"} err="failed to get container status \"0871b51c162da8a73c48fd305c6cf3f3eecdc5480c72e728c5297466fcdb012d\": rpc error: code = NotFound desc = could not find container \"0871b51c162da8a73c48fd305c6cf3f3eecdc5480c72e728c5297466fcdb012d\": container with ID starting with 0871b51c162da8a73c48fd305c6cf3f3eecdc5480c72e728c5297466fcdb012d not found: ID does not exist" Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.638608 4903 scope.go:117] "RemoveContainer" containerID="fd1af25c5bae5d52241e330c0ae766ae2639da3bfa1be5e19900efc2a932d7d7" Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.680766 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-445k7"] Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.703093 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xdd4b\" (UniqueName: \"kubernetes.io/projected/869e4577-d4a9-4f47-9010-6bea93d0e841-kube-api-access-xdd4b\") pod \"nmstate-metrics-7f946cbc9-zhc4c\" (UID: \"869e4577-d4a9-4f47-9010-6bea93d0e841\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-zhc4c" Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.703331 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-774s6\" (UniqueName: \"kubernetes.io/projected/500a4a0f-2474-482b-9f47-7304d9bd35e9-kube-api-access-774s6\") on node \"crc\" DevicePath \"\"" Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.728382 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xdd4b\" (UniqueName: \"kubernetes.io/projected/869e4577-d4a9-4f47-9010-6bea93d0e841-kube-api-access-xdd4b\") pod \"nmstate-metrics-7f946cbc9-zhc4c\" (UID: \"869e4577-d4a9-4f47-9010-6bea93d0e841\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-zhc4c" Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.861317 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-zhc4c" Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.872650 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-ctqlb"] Nov 27 00:13:13 crc kubenswrapper[4903]: I1127 00:13:13.911787 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-ctqlb"] Nov 27 00:13:14 crc kubenswrapper[4903]: I1127 00:13:14.053487 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="097195ec-5a3f-4d57-b864-264165398ff6" path="/var/lib/kubelet/pods/097195ec-5a3f-4d57-b864-264165398ff6/volumes" Nov 27 00:13:14 crc kubenswrapper[4903]: I1127 00:13:14.055791 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="500a4a0f-2474-482b-9f47-7304d9bd35e9" path="/var/lib/kubelet/pods/500a4a0f-2474-482b-9f47-7304d9bd35e9/volumes" Nov 27 00:13:14 crc kubenswrapper[4903]: I1127 00:13:14.058385 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ebd57a0-155f-481c-9d2e-11c69e14b6fc" path="/var/lib/kubelet/pods/8ebd57a0-155f-481c-9d2e-11c69e14b6fc/volumes" Nov 27 00:13:14 crc kubenswrapper[4903]: I1127 00:13:14.060224 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f0eef496-9727-4ee7-9c31-c2afcb9303c6" path="/var/lib/kubelet/pods/f0eef496-9727-4ee7-9c31-c2afcb9303c6/volumes" Nov 27 00:13:14 crc kubenswrapper[4903]: I1127 00:13:14.444987 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-zhc4c"] Nov 27 00:13:14 crc kubenswrapper[4903]: I1127 00:13:14.577800 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-zhc4c" event={"ID":"869e4577-d4a9-4f47-9010-6bea93d0e841","Type":"ContainerStarted","Data":"430f3b5cab15be79d3bad8956e8dd7c8533bec1c574a68e9a2815395d58b5d01"} Nov 27 00:13:14 crc kubenswrapper[4903]: I1127 00:13:14.579658 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-445k7" event={"ID":"dff4245e-e0aa-4425-a4b2-45235c893470","Type":"ContainerStarted","Data":"bc4eb28daa2feb97f3f12715751b17948a6762fccd1c5f6eebe13790e758fb7e"} Nov 27 00:13:16 crc kubenswrapper[4903]: I1127 00:13:16.867444 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-55f4fb8967-nxn9g"] Nov 27 00:13:16 crc kubenswrapper[4903]: I1127 00:13:16.869464 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-55f4fb8967-nxn9g" Nov 27 00:13:16 crc kubenswrapper[4903]: I1127 00:13:16.913894 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-55f4fb8967-nxn9g"] Nov 27 00:13:17 crc kubenswrapper[4903]: I1127 00:13:17.013511 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2xjh\" (UniqueName: \"kubernetes.io/projected/d4705259-0dd2-4374-b6be-3ac6e57ae8f6-kube-api-access-m2xjh\") pod \"metallb-operator-controller-manager-55f4fb8967-nxn9g\" (UID: \"d4705259-0dd2-4374-b6be-3ac6e57ae8f6\") " pod="metallb-system/metallb-operator-controller-manager-55f4fb8967-nxn9g" Nov 27 00:13:17 crc kubenswrapper[4903]: I1127 00:13:17.013889 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d4705259-0dd2-4374-b6be-3ac6e57ae8f6-apiservice-cert\") pod \"metallb-operator-controller-manager-55f4fb8967-nxn9g\" (UID: \"d4705259-0dd2-4374-b6be-3ac6e57ae8f6\") " pod="metallb-system/metallb-operator-controller-manager-55f4fb8967-nxn9g" Nov 27 00:13:17 crc kubenswrapper[4903]: I1127 00:13:17.014034 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d4705259-0dd2-4374-b6be-3ac6e57ae8f6-webhook-cert\") pod \"metallb-operator-controller-manager-55f4fb8967-nxn9g\" (UID: \"d4705259-0dd2-4374-b6be-3ac6e57ae8f6\") " pod="metallb-system/metallb-operator-controller-manager-55f4fb8967-nxn9g" Nov 27 00:13:17 crc kubenswrapper[4903]: I1127 00:13:17.115366 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2xjh\" (UniqueName: \"kubernetes.io/projected/d4705259-0dd2-4374-b6be-3ac6e57ae8f6-kube-api-access-m2xjh\") pod \"metallb-operator-controller-manager-55f4fb8967-nxn9g\" (UID: \"d4705259-0dd2-4374-b6be-3ac6e57ae8f6\") " pod="metallb-system/metallb-operator-controller-manager-55f4fb8967-nxn9g" Nov 27 00:13:17 crc kubenswrapper[4903]: I1127 00:13:17.115462 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d4705259-0dd2-4374-b6be-3ac6e57ae8f6-apiservice-cert\") pod \"metallb-operator-controller-manager-55f4fb8967-nxn9g\" (UID: \"d4705259-0dd2-4374-b6be-3ac6e57ae8f6\") " pod="metallb-system/metallb-operator-controller-manager-55f4fb8967-nxn9g" Nov 27 00:13:17 crc kubenswrapper[4903]: I1127 00:13:17.115538 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d4705259-0dd2-4374-b6be-3ac6e57ae8f6-webhook-cert\") pod \"metallb-operator-controller-manager-55f4fb8967-nxn9g\" (UID: \"d4705259-0dd2-4374-b6be-3ac6e57ae8f6\") " pod="metallb-system/metallb-operator-controller-manager-55f4fb8967-nxn9g" Nov 27 00:13:17 crc kubenswrapper[4903]: I1127 00:13:17.121122 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d4705259-0dd2-4374-b6be-3ac6e57ae8f6-webhook-cert\") pod \"metallb-operator-controller-manager-55f4fb8967-nxn9g\" (UID: \"d4705259-0dd2-4374-b6be-3ac6e57ae8f6\") " pod="metallb-system/metallb-operator-controller-manager-55f4fb8967-nxn9g" Nov 27 00:13:17 crc kubenswrapper[4903]: I1127 00:13:17.126434 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d4705259-0dd2-4374-b6be-3ac6e57ae8f6-apiservice-cert\") pod \"metallb-operator-controller-manager-55f4fb8967-nxn9g\" (UID: \"d4705259-0dd2-4374-b6be-3ac6e57ae8f6\") " pod="metallb-system/metallb-operator-controller-manager-55f4fb8967-nxn9g" Nov 27 00:13:17 crc kubenswrapper[4903]: I1127 00:13:17.134432 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2xjh\" (UniqueName: \"kubernetes.io/projected/d4705259-0dd2-4374-b6be-3ac6e57ae8f6-kube-api-access-m2xjh\") pod \"metallb-operator-controller-manager-55f4fb8967-nxn9g\" (UID: \"d4705259-0dd2-4374-b6be-3ac6e57ae8f6\") " pod="metallb-system/metallb-operator-controller-manager-55f4fb8967-nxn9g" Nov 27 00:13:17 crc kubenswrapper[4903]: I1127 00:13:17.207643 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-55f4fb8967-nxn9g" Nov 27 00:13:17 crc kubenswrapper[4903]: I1127 00:13:17.217165 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-6476c88b58-5prcv"] Nov 27 00:13:17 crc kubenswrapper[4903]: I1127 00:13:17.218831 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-6476c88b58-5prcv" Nov 27 00:13:17 crc kubenswrapper[4903]: I1127 00:13:17.270342 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-6476c88b58-5prcv"] Nov 27 00:13:17 crc kubenswrapper[4903]: I1127 00:13:17.320517 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5smp7\" (UniqueName: \"kubernetes.io/projected/8ebc84bc-9ed7-40ce-a691-cd6c151debc2-kube-api-access-5smp7\") pod \"metallb-operator-webhook-server-6476c88b58-5prcv\" (UID: \"8ebc84bc-9ed7-40ce-a691-cd6c151debc2\") " pod="metallb-system/metallb-operator-webhook-server-6476c88b58-5prcv" Nov 27 00:13:17 crc kubenswrapper[4903]: I1127 00:13:17.320825 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8ebc84bc-9ed7-40ce-a691-cd6c151debc2-webhook-cert\") pod \"metallb-operator-webhook-server-6476c88b58-5prcv\" (UID: \"8ebc84bc-9ed7-40ce-a691-cd6c151debc2\") " pod="metallb-system/metallb-operator-webhook-server-6476c88b58-5prcv" Nov 27 00:13:17 crc kubenswrapper[4903]: I1127 00:13:17.320980 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8ebc84bc-9ed7-40ce-a691-cd6c151debc2-apiservice-cert\") pod \"metallb-operator-webhook-server-6476c88b58-5prcv\" (UID: \"8ebc84bc-9ed7-40ce-a691-cd6c151debc2\") " pod="metallb-system/metallb-operator-webhook-server-6476c88b58-5prcv" Nov 27 00:13:17 crc kubenswrapper[4903]: I1127 00:13:17.425901 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8ebc84bc-9ed7-40ce-a691-cd6c151debc2-webhook-cert\") pod \"metallb-operator-webhook-server-6476c88b58-5prcv\" (UID: \"8ebc84bc-9ed7-40ce-a691-cd6c151debc2\") " pod="metallb-system/metallb-operator-webhook-server-6476c88b58-5prcv" Nov 27 00:13:17 crc kubenswrapper[4903]: I1127 00:13:17.425980 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8ebc84bc-9ed7-40ce-a691-cd6c151debc2-apiservice-cert\") pod \"metallb-operator-webhook-server-6476c88b58-5prcv\" (UID: \"8ebc84bc-9ed7-40ce-a691-cd6c151debc2\") " pod="metallb-system/metallb-operator-webhook-server-6476c88b58-5prcv" Nov 27 00:13:17 crc kubenswrapper[4903]: I1127 00:13:17.426097 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5smp7\" (UniqueName: \"kubernetes.io/projected/8ebc84bc-9ed7-40ce-a691-cd6c151debc2-kube-api-access-5smp7\") pod \"metallb-operator-webhook-server-6476c88b58-5prcv\" (UID: \"8ebc84bc-9ed7-40ce-a691-cd6c151debc2\") " pod="metallb-system/metallb-operator-webhook-server-6476c88b58-5prcv" Nov 27 00:13:17 crc kubenswrapper[4903]: I1127 00:13:17.430463 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8ebc84bc-9ed7-40ce-a691-cd6c151debc2-webhook-cert\") pod \"metallb-operator-webhook-server-6476c88b58-5prcv\" (UID: \"8ebc84bc-9ed7-40ce-a691-cd6c151debc2\") " pod="metallb-system/metallb-operator-webhook-server-6476c88b58-5prcv" Nov 27 00:13:17 crc kubenswrapper[4903]: I1127 00:13:17.431591 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8ebc84bc-9ed7-40ce-a691-cd6c151debc2-apiservice-cert\") pod \"metallb-operator-webhook-server-6476c88b58-5prcv\" (UID: \"8ebc84bc-9ed7-40ce-a691-cd6c151debc2\") " pod="metallb-system/metallb-operator-webhook-server-6476c88b58-5prcv" Nov 27 00:13:17 crc kubenswrapper[4903]: I1127 00:13:17.444665 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5smp7\" (UniqueName: \"kubernetes.io/projected/8ebc84bc-9ed7-40ce-a691-cd6c151debc2-kube-api-access-5smp7\") pod \"metallb-operator-webhook-server-6476c88b58-5prcv\" (UID: \"8ebc84bc-9ed7-40ce-a691-cd6c151debc2\") " pod="metallb-system/metallb-operator-webhook-server-6476c88b58-5prcv" Nov 27 00:13:17 crc kubenswrapper[4903]: I1127 00:13:17.560414 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-6476c88b58-5prcv" Nov 27 00:13:18 crc kubenswrapper[4903]: I1127 00:13:18.551130 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-55f4fb8967-nxn9g"] Nov 27 00:13:18 crc kubenswrapper[4903]: I1127 00:13:18.639827 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-6476c88b58-5prcv"] Nov 27 00:13:18 crc kubenswrapper[4903]: W1127 00:13:18.641297 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8ebc84bc_9ed7_40ce_a691_cd6c151debc2.slice/crio-a13bbf342b7976029e26ee1096ed2e0fcc20f6dbf33a54e074d05a871b9a259f WatchSource:0}: Error finding container a13bbf342b7976029e26ee1096ed2e0fcc20f6dbf33a54e074d05a871b9a259f: Status 404 returned error can't find the container with id a13bbf342b7976029e26ee1096ed2e0fcc20f6dbf33a54e074d05a871b9a259f Nov 27 00:13:18 crc kubenswrapper[4903]: I1127 00:13:18.725240 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-zhc4c" event={"ID":"869e4577-d4a9-4f47-9010-6bea93d0e841","Type":"ContainerStarted","Data":"77d78dbbbe27c7e24ef5e2cbf9630a8762cb8e817298e35509a39648fed4e36f"} Nov 27 00:13:18 crc kubenswrapper[4903]: I1127 00:13:18.729034 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-445k7" event={"ID":"dff4245e-e0aa-4425-a4b2-45235c893470","Type":"ContainerStarted","Data":"363074afa2960c719508008e5e557adc1d2568808c063be456479f31cf2fe87a"} Nov 27 00:13:18 crc kubenswrapper[4903]: I1127 00:13:18.729679 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-445k7" Nov 27 00:13:18 crc kubenswrapper[4903]: I1127 00:13:18.737111 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-55f4fb8967-nxn9g" event={"ID":"d4705259-0dd2-4374-b6be-3ac6e57ae8f6","Type":"ContainerStarted","Data":"922bf28b4f542a7b3a2ffc2c832ef17eacfae85c1b7b6a6aeb5e8b7f9eb1127f"} Nov 27 00:13:18 crc kubenswrapper[4903]: I1127 00:13:18.740138 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-w4kwb" event={"ID":"6e7b354b-3cfa-4d69-8d23-b5ada914c3a4","Type":"ContainerStarted","Data":"6875f3c4c04b067eb258f8d7eb9da2ee8a0440f53f31ddf08b512947f030e129"} Nov 27 00:13:18 crc kubenswrapper[4903]: I1127 00:13:18.741513 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-6476c88b58-5prcv" event={"ID":"8ebc84bc-9ed7-40ce-a691-cd6c151debc2","Type":"ContainerStarted","Data":"a13bbf342b7976029e26ee1096ed2e0fcc20f6dbf33a54e074d05a871b9a259f"} Nov 27 00:13:18 crc kubenswrapper[4903]: I1127 00:13:18.777878 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-445k7" podStartSLOduration=2.477154425 podStartE2EDuration="6.77785643s" podCreationTimestamp="2025-11-27 00:13:12 +0000 UTC" firstStartedPulling="2025-11-27 00:13:13.699723043 +0000 UTC m=+6722.389957953" lastFinishedPulling="2025-11-27 00:13:18.000425038 +0000 UTC m=+6726.690659958" observedRunningTime="2025-11-27 00:13:18.764817221 +0000 UTC m=+6727.455052141" watchObservedRunningTime="2025-11-27 00:13:18.77785643 +0000 UTC m=+6727.468091340" Nov 27 00:13:18 crc kubenswrapper[4903]: I1127 00:13:18.789450 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-w4kwb" podStartSLOduration=2.993982371 podStartE2EDuration="7.789429358s" podCreationTimestamp="2025-11-27 00:13:11 +0000 UTC" firstStartedPulling="2025-11-27 00:13:13.203865322 +0000 UTC m=+6721.894100232" lastFinishedPulling="2025-11-27 00:13:17.999312309 +0000 UTC m=+6726.689547219" observedRunningTime="2025-11-27 00:13:18.781470776 +0000 UTC m=+6727.471705696" watchObservedRunningTime="2025-11-27 00:13:18.789429358 +0000 UTC m=+6727.479664268" Nov 27 00:13:18 crc kubenswrapper[4903]: I1127 00:13:18.855301 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-hpzbl"] Nov 27 00:13:18 crc kubenswrapper[4903]: I1127 00:13:18.856287 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-hpzbl" podUID="872167df-4435-42c4-9503-8bfca809574f" containerName="nmstate-console-plugin" containerID="cri-o://d32923fa933882b237b590921fbe0a355873936b4db704f4b43eb9ecea7f10db" gracePeriod=30 Nov 27 00:13:19 crc kubenswrapper[4903]: I1127 00:13:19.521488 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-hpzbl" Nov 27 00:13:19 crc kubenswrapper[4903]: I1127 00:13:19.614562 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jxpj2\" (UniqueName: \"kubernetes.io/projected/872167df-4435-42c4-9503-8bfca809574f-kube-api-access-jxpj2\") pod \"872167df-4435-42c4-9503-8bfca809574f\" (UID: \"872167df-4435-42c4-9503-8bfca809574f\") " Nov 27 00:13:19 crc kubenswrapper[4903]: I1127 00:13:19.614617 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/872167df-4435-42c4-9503-8bfca809574f-plugin-serving-cert\") pod \"872167df-4435-42c4-9503-8bfca809574f\" (UID: \"872167df-4435-42c4-9503-8bfca809574f\") " Nov 27 00:13:19 crc kubenswrapper[4903]: I1127 00:13:19.614742 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/872167df-4435-42c4-9503-8bfca809574f-nginx-conf\") pod \"872167df-4435-42c4-9503-8bfca809574f\" (UID: \"872167df-4435-42c4-9503-8bfca809574f\") " Nov 27 00:13:19 crc kubenswrapper[4903]: I1127 00:13:19.624925 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/872167df-4435-42c4-9503-8bfca809574f-kube-api-access-jxpj2" (OuterVolumeSpecName: "kube-api-access-jxpj2") pod "872167df-4435-42c4-9503-8bfca809574f" (UID: "872167df-4435-42c4-9503-8bfca809574f"). InnerVolumeSpecName "kube-api-access-jxpj2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:13:19 crc kubenswrapper[4903]: I1127 00:13:19.628997 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/872167df-4435-42c4-9503-8bfca809574f-plugin-serving-cert" (OuterVolumeSpecName: "plugin-serving-cert") pod "872167df-4435-42c4-9503-8bfca809574f" (UID: "872167df-4435-42c4-9503-8bfca809574f"). InnerVolumeSpecName "plugin-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:13:19 crc kubenswrapper[4903]: I1127 00:13:19.683264 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/872167df-4435-42c4-9503-8bfca809574f-nginx-conf" (OuterVolumeSpecName: "nginx-conf") pod "872167df-4435-42c4-9503-8bfca809574f" (UID: "872167df-4435-42c4-9503-8bfca809574f"). InnerVolumeSpecName "nginx-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 27 00:13:19 crc kubenswrapper[4903]: I1127 00:13:19.719154 4903 reconciler_common.go:293] "Volume detached for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/872167df-4435-42c4-9503-8bfca809574f-nginx-conf\") on node \"crc\" DevicePath \"\"" Nov 27 00:13:19 crc kubenswrapper[4903]: I1127 00:13:19.719191 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jxpj2\" (UniqueName: \"kubernetes.io/projected/872167df-4435-42c4-9503-8bfca809574f-kube-api-access-jxpj2\") on node \"crc\" DevicePath \"\"" Nov 27 00:13:19 crc kubenswrapper[4903]: I1127 00:13:19.719203 4903 reconciler_common.go:293] "Volume detached for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/872167df-4435-42c4-9503-8bfca809574f-plugin-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 27 00:13:19 crc kubenswrapper[4903]: I1127 00:13:19.781434 4903 generic.go:334] "Generic (PLEG): container finished" podID="872167df-4435-42c4-9503-8bfca809574f" containerID="d32923fa933882b237b590921fbe0a355873936b4db704f4b43eb9ecea7f10db" exitCode=0 Nov 27 00:13:19 crc kubenswrapper[4903]: I1127 00:13:19.781507 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-hpzbl" event={"ID":"872167df-4435-42c4-9503-8bfca809574f","Type":"ContainerDied","Data":"d32923fa933882b237b590921fbe0a355873936b4db704f4b43eb9ecea7f10db"} Nov 27 00:13:19 crc kubenswrapper[4903]: I1127 00:13:19.781540 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-hpzbl" event={"ID":"872167df-4435-42c4-9503-8bfca809574f","Type":"ContainerDied","Data":"72888784a4e62f0f0001cb3dcf0f7b777c2a9c04d252f50e8c9abc09eb7934df"} Nov 27 00:13:19 crc kubenswrapper[4903]: I1127 00:13:19.781560 4903 scope.go:117] "RemoveContainer" containerID="d32923fa933882b237b590921fbe0a355873936b4db704f4b43eb9ecea7f10db" Nov 27 00:13:19 crc kubenswrapper[4903]: I1127 00:13:19.781683 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-hpzbl" Nov 27 00:13:19 crc kubenswrapper[4903]: I1127 00:13:19.799941 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-9g9bn" event={"ID":"da368d4d-3a8a-428e-9090-6fc0a7609bcd","Type":"ContainerStarted","Data":"d25b6267cacd2fa73ce7d03d852ccdda8a092c997b51ed5f132633578ff3358b"} Nov 27 00:13:19 crc kubenswrapper[4903]: I1127 00:13:19.800500 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-9g9bn" Nov 27 00:13:19 crc kubenswrapper[4903]: I1127 00:13:19.826153 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-9g9bn" podStartSLOduration=2.6429235699999998 podStartE2EDuration="7.826133142s" podCreationTimestamp="2025-11-27 00:13:12 +0000 UTC" firstStartedPulling="2025-11-27 00:13:12.816522368 +0000 UTC m=+6721.506757268" lastFinishedPulling="2025-11-27 00:13:17.99973193 +0000 UTC m=+6726.689966840" observedRunningTime="2025-11-27 00:13:19.817838352 +0000 UTC m=+6728.508073252" watchObservedRunningTime="2025-11-27 00:13:19.826133142 +0000 UTC m=+6728.516368052" Nov 27 00:13:19 crc kubenswrapper[4903]: I1127 00:13:19.847764 4903 scope.go:117] "RemoveContainer" containerID="d32923fa933882b237b590921fbe0a355873936b4db704f4b43eb9ecea7f10db" Nov 27 00:13:19 crc kubenswrapper[4903]: E1127 00:13:19.848872 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d32923fa933882b237b590921fbe0a355873936b4db704f4b43eb9ecea7f10db\": container with ID starting with d32923fa933882b237b590921fbe0a355873936b4db704f4b43eb9ecea7f10db not found: ID does not exist" containerID="d32923fa933882b237b590921fbe0a355873936b4db704f4b43eb9ecea7f10db" Nov 27 00:13:19 crc kubenswrapper[4903]: I1127 00:13:19.848917 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d32923fa933882b237b590921fbe0a355873936b4db704f4b43eb9ecea7f10db"} err="failed to get container status \"d32923fa933882b237b590921fbe0a355873936b4db704f4b43eb9ecea7f10db\": rpc error: code = NotFound desc = could not find container \"d32923fa933882b237b590921fbe0a355873936b4db704f4b43eb9ecea7f10db\": container with ID starting with d32923fa933882b237b590921fbe0a355873936b4db704f4b43eb9ecea7f10db not found: ID does not exist" Nov 27 00:13:19 crc kubenswrapper[4903]: I1127 00:13:19.877488 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-hpzbl"] Nov 27 00:13:19 crc kubenswrapper[4903]: I1127 00:13:19.924249 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-hpzbl"] Nov 27 00:13:20 crc kubenswrapper[4903]: I1127 00:13:20.069795 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="872167df-4435-42c4-9503-8bfca809574f" path="/var/lib/kubelet/pods/872167df-4435-42c4-9503-8bfca809574f/volumes" Nov 27 00:13:27 crc kubenswrapper[4903]: I1127 00:13:27.634395 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-9g9bn" Nov 27 00:13:30 crc kubenswrapper[4903]: I1127 00:13:30.007184 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-6476c88b58-5prcv" event={"ID":"8ebc84bc-9ed7-40ce-a691-cd6c151debc2","Type":"ContainerStarted","Data":"67ceebd1907fcf000df3a3c0107ce59a5def7ab0ebf2b740e93c0c493072ca07"} Nov 27 00:13:30 crc kubenswrapper[4903]: I1127 00:13:30.007814 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-6476c88b58-5prcv" Nov 27 00:13:30 crc kubenswrapper[4903]: I1127 00:13:30.018026 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-zhc4c" event={"ID":"869e4577-d4a9-4f47-9010-6bea93d0e841","Type":"ContainerStarted","Data":"0e97a2671ffb315d282e4a36fd3d80ae03efd03f0ed878a45c59ad070ce98663"} Nov 27 00:13:30 crc kubenswrapper[4903]: I1127 00:13:30.024966 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-55f4fb8967-nxn9g" event={"ID":"d4705259-0dd2-4374-b6be-3ac6e57ae8f6","Type":"ContainerStarted","Data":"1efcfc641b7ff7199b6027116cf717ef5cdaced6e59ab55a37c130db14601542"} Nov 27 00:13:30 crc kubenswrapper[4903]: I1127 00:13:30.027161 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-55f4fb8967-nxn9g" Nov 27 00:13:30 crc kubenswrapper[4903]: I1127 00:13:30.058065 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-6476c88b58-5prcv" podStartSLOduration=3.036660125 podStartE2EDuration="13.058047086s" podCreationTimestamp="2025-11-27 00:13:17 +0000 UTC" firstStartedPulling="2025-11-27 00:13:18.646288306 +0000 UTC m=+6727.336523216" lastFinishedPulling="2025-11-27 00:13:28.667675267 +0000 UTC m=+6737.357910177" observedRunningTime="2025-11-27 00:13:30.033151381 +0000 UTC m=+6738.723386291" watchObservedRunningTime="2025-11-27 00:13:30.058047086 +0000 UTC m=+6738.748281996" Nov 27 00:13:30 crc kubenswrapper[4903]: I1127 00:13:30.065579 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-zhc4c" podStartSLOduration=2.8932059089999997 podStartE2EDuration="17.065561247s" podCreationTimestamp="2025-11-27 00:13:13 +0000 UTC" firstStartedPulling="2025-11-27 00:13:14.462434611 +0000 UTC m=+6723.152669521" lastFinishedPulling="2025-11-27 00:13:28.634789949 +0000 UTC m=+6737.325024859" observedRunningTime="2025-11-27 00:13:30.053275999 +0000 UTC m=+6738.743510909" watchObservedRunningTime="2025-11-27 00:13:30.065561247 +0000 UTC m=+6738.755796157" Nov 27 00:13:30 crc kubenswrapper[4903]: I1127 00:13:30.094097 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-55f4fb8967-nxn9g" podStartSLOduration=4.028024723 podStartE2EDuration="14.094080008s" podCreationTimestamp="2025-11-27 00:13:16 +0000 UTC" firstStartedPulling="2025-11-27 00:13:18.567484071 +0000 UTC m=+6727.257718981" lastFinishedPulling="2025-11-27 00:13:28.633539356 +0000 UTC m=+6737.323774266" observedRunningTime="2025-11-27 00:13:30.071428993 +0000 UTC m=+6738.761663903" watchObservedRunningTime="2025-11-27 00:13:30.094080008 +0000 UTC m=+6738.784314918" Nov 27 00:13:31 crc kubenswrapper[4903]: I1127 00:13:31.982008 4903 patch_prober.go:28] interesting pod/machine-config-daemon-wjwph container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 27 00:13:31 crc kubenswrapper[4903]: I1127 00:13:31.982257 4903 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 27 00:13:31 crc kubenswrapper[4903]: I1127 00:13:31.982298 4903 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" Nov 27 00:13:31 crc kubenswrapper[4903]: I1127 00:13:31.993051 4903 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c94c27075d9b691dede6e2bdca4738ac0ca2bb05f39b1b845169635db93d8dc5"} pod="openshift-machine-config-operator/machine-config-daemon-wjwph" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 27 00:13:31 crc kubenswrapper[4903]: I1127 00:13:31.993142 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerName="machine-config-daemon" containerID="cri-o://c94c27075d9b691dede6e2bdca4738ac0ca2bb05f39b1b845169635db93d8dc5" gracePeriod=600 Nov 27 00:13:32 crc kubenswrapper[4903]: E1127 00:13:32.143817 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:13:32 crc kubenswrapper[4903]: I1127 00:13:32.598426 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z_ed8d8216-366b-44a5-b2fd-0b3ad381efc9/util/0.log" Nov 27 00:13:32 crc kubenswrapper[4903]: I1127 00:13:32.881762 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z_ed8d8216-366b-44a5-b2fd-0b3ad381efc9/pull/0.log" Nov 27 00:13:32 crc kubenswrapper[4903]: I1127 00:13:32.894818 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z_ed8d8216-366b-44a5-b2fd-0b3ad381efc9/util/0.log" Nov 27 00:13:32 crc kubenswrapper[4903]: I1127 00:13:32.918164 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-445k7" Nov 27 00:13:32 crc kubenswrapper[4903]: I1127 00:13:32.926730 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z_ed8d8216-366b-44a5-b2fd-0b3ad381efc9/pull/0.log" Nov 27 00:13:33 crc kubenswrapper[4903]: I1127 00:13:33.094424 4903 generic.go:334] "Generic (PLEG): container finished" podID="232b7aad-b4bd-495a-a411-0cfd48fa372c" containerID="c94c27075d9b691dede6e2bdca4738ac0ca2bb05f39b1b845169635db93d8dc5" exitCode=0 Nov 27 00:13:33 crc kubenswrapper[4903]: I1127 00:13:33.094468 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerDied","Data":"c94c27075d9b691dede6e2bdca4738ac0ca2bb05f39b1b845169635db93d8dc5"} Nov 27 00:13:33 crc kubenswrapper[4903]: I1127 00:13:33.094502 4903 scope.go:117] "RemoveContainer" containerID="084380461b71e9cb28e7b6a51fa622a5e26c01262b544c1e57111d81686a4f65" Nov 27 00:13:33 crc kubenswrapper[4903]: I1127 00:13:33.095384 4903 scope.go:117] "RemoveContainer" containerID="c94c27075d9b691dede6e2bdca4738ac0ca2bb05f39b1b845169635db93d8dc5" Nov 27 00:13:33 crc kubenswrapper[4903]: E1127 00:13:33.095959 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:13:33 crc kubenswrapper[4903]: I1127 00:13:33.190116 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z_ed8d8216-366b-44a5-b2fd-0b3ad381efc9/util/0.log" Nov 27 00:13:33 crc kubenswrapper[4903]: I1127 00:13:33.190836 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z_ed8d8216-366b-44a5-b2fd-0b3ad381efc9/pull/0.log" Nov 27 00:13:33 crc kubenswrapper[4903]: I1127 00:13:33.276388 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8htf7z_ed8d8216-366b-44a5-b2fd-0b3ad381efc9/extract/0.log" Nov 27 00:13:33 crc kubenswrapper[4903]: I1127 00:13:33.444029 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr_20425297-6df8-4b47-bade-0dd5e5539827/util/0.log" Nov 27 00:13:33 crc kubenswrapper[4903]: I1127 00:13:33.672219 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr_20425297-6df8-4b47-bade-0dd5e5539827/pull/0.log" Nov 27 00:13:33 crc kubenswrapper[4903]: I1127 00:13:33.694182 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr_20425297-6df8-4b47-bade-0dd5e5539827/pull/0.log" Nov 27 00:13:33 crc kubenswrapper[4903]: I1127 00:13:33.724859 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr_20425297-6df8-4b47-bade-0dd5e5539827/util/0.log" Nov 27 00:13:34 crc kubenswrapper[4903]: I1127 00:13:34.310918 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr_20425297-6df8-4b47-bade-0dd5e5539827/util/0.log" Nov 27 00:13:34 crc kubenswrapper[4903]: I1127 00:13:34.346938 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr_20425297-6df8-4b47-bade-0dd5e5539827/extract/0.log" Nov 27 00:13:34 crc kubenswrapper[4903]: I1127 00:13:34.347757 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fjjxmr_20425297-6df8-4b47-bade-0dd5e5539827/pull/0.log" Nov 27 00:13:34 crc kubenswrapper[4903]: I1127 00:13:34.610013 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8_7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f/util/0.log" Nov 27 00:13:34 crc kubenswrapper[4903]: I1127 00:13:34.857858 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8_7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f/pull/0.log" Nov 27 00:13:34 crc kubenswrapper[4903]: I1127 00:13:34.909267 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8_7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f/util/0.log" Nov 27 00:13:34 crc kubenswrapper[4903]: I1127 00:13:34.924969 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8_7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f/pull/0.log" Nov 27 00:13:35 crc kubenswrapper[4903]: I1127 00:13:35.159016 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8_7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f/pull/0.log" Nov 27 00:13:35 crc kubenswrapper[4903]: I1127 00:13:35.229936 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8_7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f/extract/0.log" Nov 27 00:13:35 crc kubenswrapper[4903]: I1127 00:13:35.242032 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772el74v8_7fbfeea9-c10d-4c3e-8c6d-65458de4aa9f/util/0.log" Nov 27 00:13:35 crc kubenswrapper[4903]: I1127 00:13:35.430365 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85_b3a3d04f-4e15-4207-ab86-0a9c7f6da454/util/0.log" Nov 27 00:13:35 crc kubenswrapper[4903]: I1127 00:13:35.594941 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85_b3a3d04f-4e15-4207-ab86-0a9c7f6da454/util/0.log" Nov 27 00:13:35 crc kubenswrapper[4903]: I1127 00:13:35.649839 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85_b3a3d04f-4e15-4207-ab86-0a9c7f6da454/pull/0.log" Nov 27 00:13:35 crc kubenswrapper[4903]: I1127 00:13:35.694955 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85_b3a3d04f-4e15-4207-ab86-0a9c7f6da454/pull/0.log" Nov 27 00:13:35 crc kubenswrapper[4903]: I1127 00:13:35.903625 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85_b3a3d04f-4e15-4207-ab86-0a9c7f6da454/pull/0.log" Nov 27 00:13:35 crc kubenswrapper[4903]: I1127 00:13:35.904332 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85_b3a3d04f-4e15-4207-ab86-0a9c7f6da454/util/0.log" Nov 27 00:13:35 crc kubenswrapper[4903]: I1127 00:13:35.944838 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921082v85_b3a3d04f-4e15-4207-ab86-0a9c7f6da454/extract/0.log" Nov 27 00:13:36 crc kubenswrapper[4903]: I1127 00:13:36.127730 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75_ff6ba428-63b4-4a8d-9b52-9d7dd77d0430/util/0.log" Nov 27 00:13:36 crc kubenswrapper[4903]: I1127 00:13:36.438784 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75_ff6ba428-63b4-4a8d-9b52-9d7dd77d0430/pull/0.log" Nov 27 00:13:36 crc kubenswrapper[4903]: I1127 00:13:36.450024 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75_ff6ba428-63b4-4a8d-9b52-9d7dd77d0430/util/0.log" Nov 27 00:13:36 crc kubenswrapper[4903]: I1127 00:13:36.492568 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75_ff6ba428-63b4-4a8d-9b52-9d7dd77d0430/pull/0.log" Nov 27 00:13:36 crc kubenswrapper[4903]: I1127 00:13:36.669964 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75_ff6ba428-63b4-4a8d-9b52-9d7dd77d0430/util/0.log" Nov 27 00:13:36 crc kubenswrapper[4903]: I1127 00:13:36.726672 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75_ff6ba428-63b4-4a8d-9b52-9d7dd77d0430/pull/0.log" Nov 27 00:13:36 crc kubenswrapper[4903]: I1127 00:13:36.779728 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fl9j75_ff6ba428-63b4-4a8d-9b52-9d7dd77d0430/extract/0.log" Nov 27 00:13:36 crc kubenswrapper[4903]: I1127 00:13:36.784184 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr_cad43621-97ed-4d84-85dc-482bafe37257/util/0.log" Nov 27 00:13:37 crc kubenswrapper[4903]: I1127 00:13:37.104396 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr_cad43621-97ed-4d84-85dc-482bafe37257/pull/0.log" Nov 27 00:13:37 crc kubenswrapper[4903]: I1127 00:13:37.111708 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr_cad43621-97ed-4d84-85dc-482bafe37257/util/0.log" Nov 27 00:13:37 crc kubenswrapper[4903]: I1127 00:13:37.173489 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr_cad43621-97ed-4d84-85dc-482bafe37257/pull/0.log" Nov 27 00:13:37 crc kubenswrapper[4903]: I1127 00:13:37.392938 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr_cad43621-97ed-4d84-85dc-482bafe37257/util/0.log" Nov 27 00:13:37 crc kubenswrapper[4903]: I1127 00:13:37.504677 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr_cad43621-97ed-4d84-85dc-482bafe37257/extract/0.log" Nov 27 00:13:37 crc kubenswrapper[4903]: I1127 00:13:37.549151 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-npsdr_33cd040b-fe08-424c-a1af-62df1ed45ad4/extract-utilities/0.log" Nov 27 00:13:37 crc kubenswrapper[4903]: I1127 00:13:37.651083 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83zp7gr_cad43621-97ed-4d84-85dc-482bafe37257/pull/0.log" Nov 27 00:13:37 crc kubenswrapper[4903]: I1127 00:13:37.697795 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-npsdr_33cd040b-fe08-424c-a1af-62df1ed45ad4/extract-content/0.log" Nov 27 00:13:37 crc kubenswrapper[4903]: I1127 00:13:37.724214 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-npsdr_33cd040b-fe08-424c-a1af-62df1ed45ad4/extract-content/0.log" Nov 27 00:13:37 crc kubenswrapper[4903]: I1127 00:13:37.724230 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-npsdr_33cd040b-fe08-424c-a1af-62df1ed45ad4/extract-utilities/0.log" Nov 27 00:13:37 crc kubenswrapper[4903]: I1127 00:13:37.953761 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-npsdr_33cd040b-fe08-424c-a1af-62df1ed45ad4/extract-utilities/0.log" Nov 27 00:13:38 crc kubenswrapper[4903]: I1127 00:13:38.036211 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-npsdr_33cd040b-fe08-424c-a1af-62df1ed45ad4/extract-content/0.log" Nov 27 00:13:38 crc kubenswrapper[4903]: I1127 00:13:38.325324 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wr586_b8dd7646-3929-498a-bfbd-40857a75e6fb/extract-utilities/0.log" Nov 27 00:13:38 crc kubenswrapper[4903]: I1127 00:13:38.371251 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-npsdr_33cd040b-fe08-424c-a1af-62df1ed45ad4/registry-server/0.log" Nov 27 00:13:38 crc kubenswrapper[4903]: I1127 00:13:38.491185 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wr586_b8dd7646-3929-498a-bfbd-40857a75e6fb/extract-utilities/0.log" Nov 27 00:13:38 crc kubenswrapper[4903]: I1127 00:13:38.508934 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wr586_b8dd7646-3929-498a-bfbd-40857a75e6fb/extract-content/0.log" Nov 27 00:13:38 crc kubenswrapper[4903]: I1127 00:13:38.564419 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wr586_b8dd7646-3929-498a-bfbd-40857a75e6fb/extract-content/0.log" Nov 27 00:13:38 crc kubenswrapper[4903]: I1127 00:13:38.881945 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wr586_b8dd7646-3929-498a-bfbd-40857a75e6fb/extract-utilities/0.log" Nov 27 00:13:38 crc kubenswrapper[4903]: I1127 00:13:38.922176 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf_0e3ab50d-4ada-420a-b14c-3bcd20623c58/util/0.log" Nov 27 00:13:39 crc kubenswrapper[4903]: I1127 00:13:39.072852 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wr586_b8dd7646-3929-498a-bfbd-40857a75e6fb/extract-content/0.log" Nov 27 00:13:39 crc kubenswrapper[4903]: I1127 00:13:39.356036 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf_0e3ab50d-4ada-420a-b14c-3bcd20623c58/util/0.log" Nov 27 00:13:39 crc kubenswrapper[4903]: I1127 00:13:39.367424 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf_0e3ab50d-4ada-420a-b14c-3bcd20623c58/pull/0.log" Nov 27 00:13:39 crc kubenswrapper[4903]: I1127 00:13:39.395338 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf_0e3ab50d-4ada-420a-b14c-3bcd20623c58/pull/0.log" Nov 27 00:13:39 crc kubenswrapper[4903]: I1127 00:13:39.562981 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wr586_b8dd7646-3929-498a-bfbd-40857a75e6fb/registry-server/0.log" Nov 27 00:13:39 crc kubenswrapper[4903]: I1127 00:13:39.663437 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf_0e3ab50d-4ada-420a-b14c-3bcd20623c58/util/0.log" Nov 27 00:13:39 crc kubenswrapper[4903]: I1127 00:13:39.714259 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf_0e3ab50d-4ada-420a-b14c-3bcd20623c58/pull/0.log" Nov 27 00:13:39 crc kubenswrapper[4903]: I1127 00:13:39.721523 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6wzvvf_0e3ab50d-4ada-420a-b14c-3bcd20623c58/extract/0.log" Nov 27 00:13:39 crc kubenswrapper[4903]: I1127 00:13:39.824918 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-f9lv7_ad2be713-f117-46a7-a491-d75a9564cd48/marketplace-operator/0.log" Nov 27 00:13:39 crc kubenswrapper[4903]: I1127 00:13:39.977069 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qldpj_f8679d09-9456-47f4-98b9-db03a62c2224/extract-utilities/0.log" Nov 27 00:13:40 crc kubenswrapper[4903]: I1127 00:13:40.150986 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qldpj_f8679d09-9456-47f4-98b9-db03a62c2224/extract-utilities/0.log" Nov 27 00:13:40 crc kubenswrapper[4903]: I1127 00:13:40.217589 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qldpj_f8679d09-9456-47f4-98b9-db03a62c2224/extract-content/0.log" Nov 27 00:13:40 crc kubenswrapper[4903]: I1127 00:13:40.259020 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qldpj_f8679d09-9456-47f4-98b9-db03a62c2224/extract-content/0.log" Nov 27 00:13:40 crc kubenswrapper[4903]: I1127 00:13:40.505113 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qldpj_f8679d09-9456-47f4-98b9-db03a62c2224/extract-content/0.log" Nov 27 00:13:40 crc kubenswrapper[4903]: I1127 00:13:40.551035 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qldpj_f8679d09-9456-47f4-98b9-db03a62c2224/extract-utilities/0.log" Nov 27 00:13:40 crc kubenswrapper[4903]: I1127 00:13:40.553641 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-qssnm_aacb9593-5b91-4faf-9bc1-2021c35ca0e5/extract-utilities/0.log" Nov 27 00:13:40 crc kubenswrapper[4903]: I1127 00:13:40.825992 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-qldpj_f8679d09-9456-47f4-98b9-db03a62c2224/registry-server/0.log" Nov 27 00:13:40 crc kubenswrapper[4903]: I1127 00:13:40.867194 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-qssnm_aacb9593-5b91-4faf-9bc1-2021c35ca0e5/extract-utilities/0.log" Nov 27 00:13:40 crc kubenswrapper[4903]: I1127 00:13:40.877759 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-qssnm_aacb9593-5b91-4faf-9bc1-2021c35ca0e5/extract-content/0.log" Nov 27 00:13:40 crc kubenswrapper[4903]: I1127 00:13:40.919333 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-qssnm_aacb9593-5b91-4faf-9bc1-2021c35ca0e5/extract-content/0.log" Nov 27 00:13:41 crc kubenswrapper[4903]: I1127 00:13:41.087206 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-qssnm_aacb9593-5b91-4faf-9bc1-2021c35ca0e5/extract-utilities/0.log" Nov 27 00:13:41 crc kubenswrapper[4903]: I1127 00:13:41.125187 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-qssnm_aacb9593-5b91-4faf-9bc1-2021c35ca0e5/extract-content/0.log" Nov 27 00:13:41 crc kubenswrapper[4903]: I1127 00:13:41.187653 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-qssnm_aacb9593-5b91-4faf-9bc1-2021c35ca0e5/registry-server/0.log" Nov 27 00:13:47 crc kubenswrapper[4903]: I1127 00:13:47.029545 4903 scope.go:117] "RemoveContainer" containerID="c94c27075d9b691dede6e2bdca4738ac0ca2bb05f39b1b845169635db93d8dc5" Nov 27 00:13:47 crc kubenswrapper[4903]: E1127 00:13:47.030441 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:13:47 crc kubenswrapper[4903]: I1127 00:13:47.567759 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-6476c88b58-5prcv" Nov 27 00:13:47 crc kubenswrapper[4903]: I1127 00:13:47.649032 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["metallb-system/metallb-operator-webhook-server-d76ff59f5-8bdc9"] Nov 27 00:13:47 crc kubenswrapper[4903]: I1127 00:13:47.649289 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/metallb-operator-webhook-server-d76ff59f5-8bdc9" podUID="1c731f8b-9333-4076-b193-54255a31e938" containerName="webhook-server" containerID="cri-o://3eab262027c4c654102e5726929d85c95b01615b65d801ad2c3c92b32fb12cb4" gracePeriod=2 Nov 27 00:13:47 crc kubenswrapper[4903]: I1127 00:13:47.674777 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["metallb-system/metallb-operator-webhook-server-d76ff59f5-8bdc9"] Nov 27 00:13:48 crc kubenswrapper[4903]: I1127 00:13:48.296218 4903 generic.go:334] "Generic (PLEG): container finished" podID="1c731f8b-9333-4076-b193-54255a31e938" containerID="3eab262027c4c654102e5726929d85c95b01615b65d801ad2c3c92b32fb12cb4" exitCode=0 Nov 27 00:13:48 crc kubenswrapper[4903]: I1127 00:13:48.499455 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-d76ff59f5-8bdc9" Nov 27 00:13:48 crc kubenswrapper[4903]: I1127 00:13:48.688221 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-45zff\" (UniqueName: \"kubernetes.io/projected/1c731f8b-9333-4076-b193-54255a31e938-kube-api-access-45zff\") pod \"1c731f8b-9333-4076-b193-54255a31e938\" (UID: \"1c731f8b-9333-4076-b193-54255a31e938\") " Nov 27 00:13:48 crc kubenswrapper[4903]: I1127 00:13:48.688272 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1c731f8b-9333-4076-b193-54255a31e938-webhook-cert\") pod \"1c731f8b-9333-4076-b193-54255a31e938\" (UID: \"1c731f8b-9333-4076-b193-54255a31e938\") " Nov 27 00:13:48 crc kubenswrapper[4903]: I1127 00:13:48.688455 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1c731f8b-9333-4076-b193-54255a31e938-apiservice-cert\") pod \"1c731f8b-9333-4076-b193-54255a31e938\" (UID: \"1c731f8b-9333-4076-b193-54255a31e938\") " Nov 27 00:13:48 crc kubenswrapper[4903]: I1127 00:13:48.696220 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c731f8b-9333-4076-b193-54255a31e938-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "1c731f8b-9333-4076-b193-54255a31e938" (UID: "1c731f8b-9333-4076-b193-54255a31e938"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:13:48 crc kubenswrapper[4903]: I1127 00:13:48.696238 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c731f8b-9333-4076-b193-54255a31e938-kube-api-access-45zff" (OuterVolumeSpecName: "kube-api-access-45zff") pod "1c731f8b-9333-4076-b193-54255a31e938" (UID: "1c731f8b-9333-4076-b193-54255a31e938"). InnerVolumeSpecName "kube-api-access-45zff". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:13:48 crc kubenswrapper[4903]: I1127 00:13:48.708639 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c731f8b-9333-4076-b193-54255a31e938-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "1c731f8b-9333-4076-b193-54255a31e938" (UID: "1c731f8b-9333-4076-b193-54255a31e938"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:13:48 crc kubenswrapper[4903]: I1127 00:13:48.791187 4903 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1c731f8b-9333-4076-b193-54255a31e938-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 27 00:13:48 crc kubenswrapper[4903]: I1127 00:13:48.791463 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-45zff\" (UniqueName: \"kubernetes.io/projected/1c731f8b-9333-4076-b193-54255a31e938-kube-api-access-45zff\") on node \"crc\" DevicePath \"\"" Nov 27 00:13:48 crc kubenswrapper[4903]: I1127 00:13:48.791480 4903 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1c731f8b-9333-4076-b193-54255a31e938-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 27 00:13:49 crc kubenswrapper[4903]: I1127 00:13:49.308364 4903 scope.go:117] "RemoveContainer" containerID="3eab262027c4c654102e5726929d85c95b01615b65d801ad2c3c92b32fb12cb4" Nov 27 00:13:49 crc kubenswrapper[4903]: I1127 00:13:49.308548 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-d76ff59f5-8bdc9" Nov 27 00:13:50 crc kubenswrapper[4903]: I1127 00:13:50.048867 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c731f8b-9333-4076-b193-54255a31e938" path="/var/lib/kubelet/pods/1c731f8b-9333-4076-b193-54255a31e938/volumes" Nov 27 00:13:54 crc kubenswrapper[4903]: I1127 00:13:54.994634 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-lkc95"] Nov 27 00:13:54 crc kubenswrapper[4903]: E1127 00:13:54.995580 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="872167df-4435-42c4-9503-8bfca809574f" containerName="nmstate-console-plugin" Nov 27 00:13:54 crc kubenswrapper[4903]: I1127 00:13:54.995597 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="872167df-4435-42c4-9503-8bfca809574f" containerName="nmstate-console-plugin" Nov 27 00:13:54 crc kubenswrapper[4903]: E1127 00:13:54.995618 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c731f8b-9333-4076-b193-54255a31e938" containerName="webhook-server" Nov 27 00:13:54 crc kubenswrapper[4903]: I1127 00:13:54.995625 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c731f8b-9333-4076-b193-54255a31e938" containerName="webhook-server" Nov 27 00:13:54 crc kubenswrapper[4903]: I1127 00:13:54.996007 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c731f8b-9333-4076-b193-54255a31e938" containerName="webhook-server" Nov 27 00:13:54 crc kubenswrapper[4903]: I1127 00:13:54.996023 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="872167df-4435-42c4-9503-8bfca809574f" containerName="nmstate-console-plugin" Nov 27 00:13:54 crc kubenswrapper[4903]: I1127 00:13:54.999646 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lkc95" Nov 27 00:13:55 crc kubenswrapper[4903]: I1127 00:13:55.023307 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lkc95"] Nov 27 00:13:55 crc kubenswrapper[4903]: I1127 00:13:55.130319 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87524cd1-8243-49b5-a64f-27d55fbdc585-utilities\") pod \"certified-operators-lkc95\" (UID: \"87524cd1-8243-49b5-a64f-27d55fbdc585\") " pod="openshift-marketplace/certified-operators-lkc95" Nov 27 00:13:55 crc kubenswrapper[4903]: I1127 00:13:55.130832 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87524cd1-8243-49b5-a64f-27d55fbdc585-catalog-content\") pod \"certified-operators-lkc95\" (UID: \"87524cd1-8243-49b5-a64f-27d55fbdc585\") " pod="openshift-marketplace/certified-operators-lkc95" Nov 27 00:13:55 crc kubenswrapper[4903]: I1127 00:13:55.131227 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-64qb9\" (UniqueName: \"kubernetes.io/projected/87524cd1-8243-49b5-a64f-27d55fbdc585-kube-api-access-64qb9\") pod \"certified-operators-lkc95\" (UID: \"87524cd1-8243-49b5-a64f-27d55fbdc585\") " pod="openshift-marketplace/certified-operators-lkc95" Nov 27 00:13:55 crc kubenswrapper[4903]: I1127 00:13:55.233632 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87524cd1-8243-49b5-a64f-27d55fbdc585-catalog-content\") pod \"certified-operators-lkc95\" (UID: \"87524cd1-8243-49b5-a64f-27d55fbdc585\") " pod="openshift-marketplace/certified-operators-lkc95" Nov 27 00:13:55 crc kubenswrapper[4903]: I1127 00:13:55.233777 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-64qb9\" (UniqueName: \"kubernetes.io/projected/87524cd1-8243-49b5-a64f-27d55fbdc585-kube-api-access-64qb9\") pod \"certified-operators-lkc95\" (UID: \"87524cd1-8243-49b5-a64f-27d55fbdc585\") " pod="openshift-marketplace/certified-operators-lkc95" Nov 27 00:13:55 crc kubenswrapper[4903]: I1127 00:13:55.233818 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87524cd1-8243-49b5-a64f-27d55fbdc585-utilities\") pod \"certified-operators-lkc95\" (UID: \"87524cd1-8243-49b5-a64f-27d55fbdc585\") " pod="openshift-marketplace/certified-operators-lkc95" Nov 27 00:13:55 crc kubenswrapper[4903]: I1127 00:13:55.234157 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87524cd1-8243-49b5-a64f-27d55fbdc585-catalog-content\") pod \"certified-operators-lkc95\" (UID: \"87524cd1-8243-49b5-a64f-27d55fbdc585\") " pod="openshift-marketplace/certified-operators-lkc95" Nov 27 00:13:55 crc kubenswrapper[4903]: I1127 00:13:55.234240 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87524cd1-8243-49b5-a64f-27d55fbdc585-utilities\") pod \"certified-operators-lkc95\" (UID: \"87524cd1-8243-49b5-a64f-27d55fbdc585\") " pod="openshift-marketplace/certified-operators-lkc95" Nov 27 00:13:55 crc kubenswrapper[4903]: I1127 00:13:55.260095 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-64qb9\" (UniqueName: \"kubernetes.io/projected/87524cd1-8243-49b5-a64f-27d55fbdc585-kube-api-access-64qb9\") pod \"certified-operators-lkc95\" (UID: \"87524cd1-8243-49b5-a64f-27d55fbdc585\") " pod="openshift-marketplace/certified-operators-lkc95" Nov 27 00:13:55 crc kubenswrapper[4903]: I1127 00:13:55.333079 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lkc95" Nov 27 00:13:55 crc kubenswrapper[4903]: I1127 00:13:55.895880 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lkc95"] Nov 27 00:13:56 crc kubenswrapper[4903]: I1127 00:13:56.351460 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-668cf9dfbb-kfn8d_5127cf5c-29a6-484d-9e1c-895e2bb109e3/prometheus-operator/0.log" Nov 27 00:13:56 crc kubenswrapper[4903]: I1127 00:13:56.423037 4903 generic.go:334] "Generic (PLEG): container finished" podID="87524cd1-8243-49b5-a64f-27d55fbdc585" containerID="1b73d6cbeadb32e1ea272999304d537ea64fe57d60193a97477e25732aa8c7b9" exitCode=0 Nov 27 00:13:56 crc kubenswrapper[4903]: I1127 00:13:56.423083 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lkc95" event={"ID":"87524cd1-8243-49b5-a64f-27d55fbdc585","Type":"ContainerDied","Data":"1b73d6cbeadb32e1ea272999304d537ea64fe57d60193a97477e25732aa8c7b9"} Nov 27 00:13:56 crc kubenswrapper[4903]: I1127 00:13:56.423132 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lkc95" event={"ID":"87524cd1-8243-49b5-a64f-27d55fbdc585","Type":"ContainerStarted","Data":"adfc35ad08396e0a668a8787c7ca2a018b33f742dbd973483a6c13950aca1868"} Nov 27 00:13:56 crc kubenswrapper[4903]: I1127 00:13:56.459670 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-594b6c89d9-56j5b_68d1419a-288f-4fcb-9d4d-8f9568fa2170/prometheus-operator-admission-webhook/0.log" Nov 27 00:13:56 crc kubenswrapper[4903]: I1127 00:13:56.543840 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-594b6c89d9-jw268_7674d75c-8272-4f53-86fe-3fb83d421c63/prometheus-operator-admission-webhook/0.log" Nov 27 00:13:56 crc kubenswrapper[4903]: I1127 00:13:56.721423 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-d8bb48f5d-6gzsx_c3c5c9e5-d4c4-4fcd-8c0c-dee6f7ce6471/operator/0.log" Nov 27 00:13:56 crc kubenswrapper[4903]: I1127 00:13:56.752673 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-ui-dashboards-7d5fb4cbfb-ccgq6_dff09e4b-a38e-43fa-8394-e6922e356c4d/observability-ui-dashboards/0.log" Nov 27 00:13:56 crc kubenswrapper[4903]: I1127 00:13:56.931458 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5446b9c989-clswb_193e7d1c-0f98-4013-aad9-16711a00ab2e/perses-operator/0.log" Nov 27 00:13:58 crc kubenswrapper[4903]: I1127 00:13:58.443715 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lkc95" event={"ID":"87524cd1-8243-49b5-a64f-27d55fbdc585","Type":"ContainerStarted","Data":"303a1cddf924b80ae1753277e951b06c2fb4d5be8bb52f5427c6b4f58dfb6305"} Nov 27 00:14:00 crc kubenswrapper[4903]: I1127 00:14:00.029875 4903 scope.go:117] "RemoveContainer" containerID="c94c27075d9b691dede6e2bdca4738ac0ca2bb05f39b1b845169635db93d8dc5" Nov 27 00:14:00 crc kubenswrapper[4903]: E1127 00:14:00.030568 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:14:00 crc kubenswrapper[4903]: I1127 00:14:00.469064 4903 generic.go:334] "Generic (PLEG): container finished" podID="87524cd1-8243-49b5-a64f-27d55fbdc585" containerID="303a1cddf924b80ae1753277e951b06c2fb4d5be8bb52f5427c6b4f58dfb6305" exitCode=0 Nov 27 00:14:00 crc kubenswrapper[4903]: I1127 00:14:00.469114 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lkc95" event={"ID":"87524cd1-8243-49b5-a64f-27d55fbdc585","Type":"ContainerDied","Data":"303a1cddf924b80ae1753277e951b06c2fb4d5be8bb52f5427c6b4f58dfb6305"} Nov 27 00:14:01 crc kubenswrapper[4903]: I1127 00:14:01.482232 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lkc95" event={"ID":"87524cd1-8243-49b5-a64f-27d55fbdc585","Type":"ContainerStarted","Data":"d0ad6806e431115a2923521dad0f4d337f67fcdcd9b0141d73d81c9321a83dd0"} Nov 27 00:14:01 crc kubenswrapper[4903]: I1127 00:14:01.503882 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-lkc95" podStartSLOduration=2.899756567 podStartE2EDuration="7.503859375s" podCreationTimestamp="2025-11-27 00:13:54 +0000 UTC" firstStartedPulling="2025-11-27 00:13:56.425069001 +0000 UTC m=+6765.115303911" lastFinishedPulling="2025-11-27 00:14:01.029171809 +0000 UTC m=+6769.719406719" observedRunningTime="2025-11-27 00:14:01.498887623 +0000 UTC m=+6770.189122573" watchObservedRunningTime="2025-11-27 00:14:01.503859375 +0000 UTC m=+6770.194094285" Nov 27 00:14:05 crc kubenswrapper[4903]: I1127 00:14:05.333867 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-lkc95" Nov 27 00:14:05 crc kubenswrapper[4903]: I1127 00:14:05.334260 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-lkc95" Nov 27 00:14:06 crc kubenswrapper[4903]: I1127 00:14:06.380680 4903 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-lkc95" podUID="87524cd1-8243-49b5-a64f-27d55fbdc585" containerName="registry-server" probeResult="failure" output=< Nov 27 00:14:06 crc kubenswrapper[4903]: timeout: failed to connect service ":50051" within 1s Nov 27 00:14:06 crc kubenswrapper[4903]: > Nov 27 00:14:07 crc kubenswrapper[4903]: I1127 00:14:07.212250 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-55f4fb8967-nxn9g" Nov 27 00:14:07 crc kubenswrapper[4903]: I1127 00:14:07.297903 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb"] Nov 27 00:14:07 crc kubenswrapper[4903]: I1127 00:14:07.298114 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" containerName="manager" containerID="cri-o://ca84a094eae0897716516c2c819744e6ec3daf1c9d41e3ba2a9b4082f4689bcf" gracePeriod=10 Nov 27 00:14:07 crc kubenswrapper[4903]: I1127 00:14:07.564327 4903 generic.go:334] "Generic (PLEG): container finished" podID="b5900302-4880-4732-a477-8ed6cf3bfec3" containerID="ca84a094eae0897716516c2c819744e6ec3daf1c9d41e3ba2a9b4082f4689bcf" exitCode=0 Nov 27 00:14:07 crc kubenswrapper[4903]: I1127 00:14:07.564366 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" event={"ID":"b5900302-4880-4732-a477-8ed6cf3bfec3","Type":"ContainerDied","Data":"ca84a094eae0897716516c2c819744e6ec3daf1c9d41e3ba2a9b4082f4689bcf"} Nov 27 00:14:07 crc kubenswrapper[4903]: I1127 00:14:07.564649 4903 scope.go:117] "RemoveContainer" containerID="00fb6c317c57413346dffe0c0ddda07a4f2a2c61d9ffd456f78c2ab1f561e73f" Nov 27 00:14:07 crc kubenswrapper[4903]: I1127 00:14:07.929199 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" Nov 27 00:14:08 crc kubenswrapper[4903]: I1127 00:14:08.028383 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/b5900302-4880-4732-a477-8ed6cf3bfec3-webhook-cert\") pod \"b5900302-4880-4732-a477-8ed6cf3bfec3\" (UID: \"b5900302-4880-4732-a477-8ed6cf3bfec3\") " Nov 27 00:14:08 crc kubenswrapper[4903]: I1127 00:14:08.028553 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gtqhg\" (UniqueName: \"kubernetes.io/projected/b5900302-4880-4732-a477-8ed6cf3bfec3-kube-api-access-gtqhg\") pod \"b5900302-4880-4732-a477-8ed6cf3bfec3\" (UID: \"b5900302-4880-4732-a477-8ed6cf3bfec3\") " Nov 27 00:14:08 crc kubenswrapper[4903]: I1127 00:14:08.028745 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/b5900302-4880-4732-a477-8ed6cf3bfec3-apiservice-cert\") pod \"b5900302-4880-4732-a477-8ed6cf3bfec3\" (UID: \"b5900302-4880-4732-a477-8ed6cf3bfec3\") " Nov 27 00:14:08 crc kubenswrapper[4903]: I1127 00:14:08.034776 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5900302-4880-4732-a477-8ed6cf3bfec3-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "b5900302-4880-4732-a477-8ed6cf3bfec3" (UID: "b5900302-4880-4732-a477-8ed6cf3bfec3"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:14:08 crc kubenswrapper[4903]: I1127 00:14:08.047270 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5900302-4880-4732-a477-8ed6cf3bfec3-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "b5900302-4880-4732-a477-8ed6cf3bfec3" (UID: "b5900302-4880-4732-a477-8ed6cf3bfec3"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:14:08 crc kubenswrapper[4903]: I1127 00:14:08.056944 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5900302-4880-4732-a477-8ed6cf3bfec3-kube-api-access-gtqhg" (OuterVolumeSpecName: "kube-api-access-gtqhg") pod "b5900302-4880-4732-a477-8ed6cf3bfec3" (UID: "b5900302-4880-4732-a477-8ed6cf3bfec3"). InnerVolumeSpecName "kube-api-access-gtqhg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:14:08 crc kubenswrapper[4903]: I1127 00:14:08.132189 4903 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/b5900302-4880-4732-a477-8ed6cf3bfec3-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 27 00:14:08 crc kubenswrapper[4903]: I1127 00:14:08.132225 4903 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/b5900302-4880-4732-a477-8ed6cf3bfec3-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 27 00:14:08 crc kubenswrapper[4903]: I1127 00:14:08.132234 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gtqhg\" (UniqueName: \"kubernetes.io/projected/b5900302-4880-4732-a477-8ed6cf3bfec3-kube-api-access-gtqhg\") on node \"crc\" DevicePath \"\"" Nov 27 00:14:08 crc kubenswrapper[4903]: I1127 00:14:08.577053 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" event={"ID":"b5900302-4880-4732-a477-8ed6cf3bfec3","Type":"ContainerDied","Data":"035c72852c6a323f0f57805549b0d7d924b7db9e5c36cc483f667179d57dbb73"} Nov 27 00:14:08 crc kubenswrapper[4903]: I1127 00:14:08.577106 4903 scope.go:117] "RemoveContainer" containerID="ca84a094eae0897716516c2c819744e6ec3daf1c9d41e3ba2a9b4082f4689bcf" Nov 27 00:14:08 crc kubenswrapper[4903]: I1127 00:14:08.577138 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb" Nov 27 00:14:08 crc kubenswrapper[4903]: I1127 00:14:08.611480 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb"] Nov 27 00:14:08 crc kubenswrapper[4903]: I1127 00:14:08.627764 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["metallb-system/metallb-operator-controller-manager-57594f7c4c-gdzqb"] Nov 27 00:14:10 crc kubenswrapper[4903]: I1127 00:14:10.045497 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" path="/var/lib/kubelet/pods/b5900302-4880-4732-a477-8ed6cf3bfec3/volumes" Nov 27 00:14:11 crc kubenswrapper[4903]: I1127 00:14:11.771443 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-5c85bfb685-pwxll_a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b/kube-rbac-proxy/0.log" Nov 27 00:14:11 crc kubenswrapper[4903]: I1127 00:14:11.846201 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-5c85bfb685-pwxll_a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b/manager/1.log" Nov 27 00:14:11 crc kubenswrapper[4903]: I1127 00:14:11.871769 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-5c85bfb685-pwxll_a9cea7ef-9976-4fe6-bc99-bd4a71c8ee7b/manager/0.log" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.028629 4903 scope.go:117] "RemoveContainer" containerID="c94c27075d9b691dede6e2bdca4738ac0ca2bb05f39b1b845169635db93d8dc5" Nov 27 00:14:15 crc kubenswrapper[4903]: E1127 00:14:15.029399 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.411535 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-lkc95" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.482187 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-lkc95" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.508964 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-5cf6d5447f-sb6mg"] Nov 27 00:14:15 crc kubenswrapper[4903]: E1127 00:14:15.509632 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" containerName="manager" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.509656 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" containerName="manager" Nov 27 00:14:15 crc kubenswrapper[4903]: E1127 00:14:15.509668 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" containerName="manager" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.509675 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" containerName="manager" Nov 27 00:14:15 crc kubenswrapper[4903]: E1127 00:14:15.509726 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" containerName="manager" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.509736 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" containerName="manager" Nov 27 00:14:15 crc kubenswrapper[4903]: E1127 00:14:15.509757 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" containerName="manager" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.509765 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" containerName="manager" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.510032 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" containerName="manager" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.510069 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" containerName="manager" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.511107 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-5cf6d5447f-sb6mg" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.530330 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-5cf6d5447f-sb6mg"] Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.612081 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0d17fa55-5782-4674-bc08-1c264223c89a-apiservice-cert\") pod \"metallb-operator-controller-manager-5cf6d5447f-sb6mg\" (UID: \"0d17fa55-5782-4674-bc08-1c264223c89a\") " pod="metallb-system/metallb-operator-controller-manager-5cf6d5447f-sb6mg" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.612195 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0d17fa55-5782-4674-bc08-1c264223c89a-webhook-cert\") pod \"metallb-operator-controller-manager-5cf6d5447f-sb6mg\" (UID: \"0d17fa55-5782-4674-bc08-1c264223c89a\") " pod="metallb-system/metallb-operator-controller-manager-5cf6d5447f-sb6mg" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.612262 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wb58c\" (UniqueName: \"kubernetes.io/projected/0d17fa55-5782-4674-bc08-1c264223c89a-kube-api-access-wb58c\") pod \"metallb-operator-controller-manager-5cf6d5447f-sb6mg\" (UID: \"0d17fa55-5782-4674-bc08-1c264223c89a\") " pod="metallb-system/metallb-operator-controller-manager-5cf6d5447f-sb6mg" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.714288 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0d17fa55-5782-4674-bc08-1c264223c89a-apiservice-cert\") pod \"metallb-operator-controller-manager-5cf6d5447f-sb6mg\" (UID: \"0d17fa55-5782-4674-bc08-1c264223c89a\") " pod="metallb-system/metallb-operator-controller-manager-5cf6d5447f-sb6mg" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.714341 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0d17fa55-5782-4674-bc08-1c264223c89a-webhook-cert\") pod \"metallb-operator-controller-manager-5cf6d5447f-sb6mg\" (UID: \"0d17fa55-5782-4674-bc08-1c264223c89a\") " pod="metallb-system/metallb-operator-controller-manager-5cf6d5447f-sb6mg" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.714371 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wb58c\" (UniqueName: \"kubernetes.io/projected/0d17fa55-5782-4674-bc08-1c264223c89a-kube-api-access-wb58c\") pod \"metallb-operator-controller-manager-5cf6d5447f-sb6mg\" (UID: \"0d17fa55-5782-4674-bc08-1c264223c89a\") " pod="metallb-system/metallb-operator-controller-manager-5cf6d5447f-sb6mg" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.731417 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0d17fa55-5782-4674-bc08-1c264223c89a-apiservice-cert\") pod \"metallb-operator-controller-manager-5cf6d5447f-sb6mg\" (UID: \"0d17fa55-5782-4674-bc08-1c264223c89a\") " pod="metallb-system/metallb-operator-controller-manager-5cf6d5447f-sb6mg" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.734357 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0d17fa55-5782-4674-bc08-1c264223c89a-webhook-cert\") pod \"metallb-operator-controller-manager-5cf6d5447f-sb6mg\" (UID: \"0d17fa55-5782-4674-bc08-1c264223c89a\") " pod="metallb-system/metallb-operator-controller-manager-5cf6d5447f-sb6mg" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.763616 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-c6fb8fd-gbdfx"] Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.764457 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" containerName="manager" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.765508 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-c6fb8fd-gbdfx" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.771486 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wb58c\" (UniqueName: \"kubernetes.io/projected/0d17fa55-5782-4674-bc08-1c264223c89a-kube-api-access-wb58c\") pod \"metallb-operator-controller-manager-5cf6d5447f-sb6mg\" (UID: \"0d17fa55-5782-4674-bc08-1c264223c89a\") " pod="metallb-system/metallb-operator-controller-manager-5cf6d5447f-sb6mg" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.790895 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-c6fb8fd-gbdfx"] Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.816748 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lkc95"] Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.818185 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-snptf\" (UniqueName: \"kubernetes.io/projected/ac383c08-a746-47db-89d9-228585d98c8d-kube-api-access-snptf\") pod \"metallb-operator-webhook-server-c6fb8fd-gbdfx\" (UID: \"ac383c08-a746-47db-89d9-228585d98c8d\") " pod="metallb-system/metallb-operator-webhook-server-c6fb8fd-gbdfx" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.818380 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ac383c08-a746-47db-89d9-228585d98c8d-webhook-cert\") pod \"metallb-operator-webhook-server-c6fb8fd-gbdfx\" (UID: \"ac383c08-a746-47db-89d9-228585d98c8d\") " pod="metallb-system/metallb-operator-webhook-server-c6fb8fd-gbdfx" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.818511 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ac383c08-a746-47db-89d9-228585d98c8d-apiservice-cert\") pod \"metallb-operator-webhook-server-c6fb8fd-gbdfx\" (UID: \"ac383c08-a746-47db-89d9-228585d98c8d\") " pod="metallb-system/metallb-operator-webhook-server-c6fb8fd-gbdfx" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.836523 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-5cf6d5447f-sb6mg" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.923578 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ac383c08-a746-47db-89d9-228585d98c8d-apiservice-cert\") pod \"metallb-operator-webhook-server-c6fb8fd-gbdfx\" (UID: \"ac383c08-a746-47db-89d9-228585d98c8d\") " pod="metallb-system/metallb-operator-webhook-server-c6fb8fd-gbdfx" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.924032 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-snptf\" (UniqueName: \"kubernetes.io/projected/ac383c08-a746-47db-89d9-228585d98c8d-kube-api-access-snptf\") pod \"metallb-operator-webhook-server-c6fb8fd-gbdfx\" (UID: \"ac383c08-a746-47db-89d9-228585d98c8d\") " pod="metallb-system/metallb-operator-webhook-server-c6fb8fd-gbdfx" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.924140 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ac383c08-a746-47db-89d9-228585d98c8d-webhook-cert\") pod \"metallb-operator-webhook-server-c6fb8fd-gbdfx\" (UID: \"ac383c08-a746-47db-89d9-228585d98c8d\") " pod="metallb-system/metallb-operator-webhook-server-c6fb8fd-gbdfx" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.931458 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ac383c08-a746-47db-89d9-228585d98c8d-webhook-cert\") pod \"metallb-operator-webhook-server-c6fb8fd-gbdfx\" (UID: \"ac383c08-a746-47db-89d9-228585d98c8d\") " pod="metallb-system/metallb-operator-webhook-server-c6fb8fd-gbdfx" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.931842 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ac383c08-a746-47db-89d9-228585d98c8d-apiservice-cert\") pod \"metallb-operator-webhook-server-c6fb8fd-gbdfx\" (UID: \"ac383c08-a746-47db-89d9-228585d98c8d\") " pod="metallb-system/metallb-operator-webhook-server-c6fb8fd-gbdfx" Nov 27 00:14:15 crc kubenswrapper[4903]: I1127 00:14:15.943853 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-snptf\" (UniqueName: \"kubernetes.io/projected/ac383c08-a746-47db-89d9-228585d98c8d-kube-api-access-snptf\") pod \"metallb-operator-webhook-server-c6fb8fd-gbdfx\" (UID: \"ac383c08-a746-47db-89d9-228585d98c8d\") " pod="metallb-system/metallb-operator-webhook-server-c6fb8fd-gbdfx" Nov 27 00:14:16 crc kubenswrapper[4903]: I1127 00:14:16.045996 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-c6fb8fd-gbdfx" Nov 27 00:14:16 crc kubenswrapper[4903]: W1127 00:14:16.644177 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d17fa55_5782_4674_bc08_1c264223c89a.slice/crio-291ce7e06936c1ec5b22e68642972903e9d53f09d0d25e5651c8c8cabb7314ad WatchSource:0}: Error finding container 291ce7e06936c1ec5b22e68642972903e9d53f09d0d25e5651c8c8cabb7314ad: Status 404 returned error can't find the container with id 291ce7e06936c1ec5b22e68642972903e9d53f09d0d25e5651c8c8cabb7314ad Nov 27 00:14:16 crc kubenswrapper[4903]: I1127 00:14:16.646002 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-5cf6d5447f-sb6mg"] Nov 27 00:14:16 crc kubenswrapper[4903]: I1127 00:14:16.692496 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-lkc95" podUID="87524cd1-8243-49b5-a64f-27d55fbdc585" containerName="registry-server" containerID="cri-o://d0ad6806e431115a2923521dad0f4d337f67fcdcd9b0141d73d81c9321a83dd0" gracePeriod=2 Nov 27 00:14:16 crc kubenswrapper[4903]: I1127 00:14:16.692767 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5cf6d5447f-sb6mg" event={"ID":"0d17fa55-5782-4674-bc08-1c264223c89a","Type":"ContainerStarted","Data":"291ce7e06936c1ec5b22e68642972903e9d53f09d0d25e5651c8c8cabb7314ad"} Nov 27 00:14:16 crc kubenswrapper[4903]: I1127 00:14:16.731377 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-c6fb8fd-gbdfx"] Nov 27 00:14:16 crc kubenswrapper[4903]: W1127 00:14:16.742196 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podac383c08_a746_47db_89d9_228585d98c8d.slice/crio-a7a45a0241771c6ada78eee06ccef32ca973c475c6c1dcf9284f5daa09742887 WatchSource:0}: Error finding container a7a45a0241771c6ada78eee06ccef32ca973c475c6c1dcf9284f5daa09742887: Status 404 returned error can't find the container with id a7a45a0241771c6ada78eee06ccef32ca973c475c6c1dcf9284f5daa09742887 Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.271459 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lkc95" Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.375358 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87524cd1-8243-49b5-a64f-27d55fbdc585-catalog-content\") pod \"87524cd1-8243-49b5-a64f-27d55fbdc585\" (UID: \"87524cd1-8243-49b5-a64f-27d55fbdc585\") " Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.375568 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87524cd1-8243-49b5-a64f-27d55fbdc585-utilities\") pod \"87524cd1-8243-49b5-a64f-27d55fbdc585\" (UID: \"87524cd1-8243-49b5-a64f-27d55fbdc585\") " Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.375857 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-64qb9\" (UniqueName: \"kubernetes.io/projected/87524cd1-8243-49b5-a64f-27d55fbdc585-kube-api-access-64qb9\") pod \"87524cd1-8243-49b5-a64f-27d55fbdc585\" (UID: \"87524cd1-8243-49b5-a64f-27d55fbdc585\") " Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.376456 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87524cd1-8243-49b5-a64f-27d55fbdc585-utilities" (OuterVolumeSpecName: "utilities") pod "87524cd1-8243-49b5-a64f-27d55fbdc585" (UID: "87524cd1-8243-49b5-a64f-27d55fbdc585"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.376762 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87524cd1-8243-49b5-a64f-27d55fbdc585-utilities\") on node \"crc\" DevicePath \"\"" Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.382425 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87524cd1-8243-49b5-a64f-27d55fbdc585-kube-api-access-64qb9" (OuterVolumeSpecName: "kube-api-access-64qb9") pod "87524cd1-8243-49b5-a64f-27d55fbdc585" (UID: "87524cd1-8243-49b5-a64f-27d55fbdc585"). InnerVolumeSpecName "kube-api-access-64qb9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.431347 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87524cd1-8243-49b5-a64f-27d55fbdc585-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "87524cd1-8243-49b5-a64f-27d55fbdc585" (UID: "87524cd1-8243-49b5-a64f-27d55fbdc585"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.480318 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-64qb9\" (UniqueName: \"kubernetes.io/projected/87524cd1-8243-49b5-a64f-27d55fbdc585-kube-api-access-64qb9\") on node \"crc\" DevicePath \"\"" Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.480358 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87524cd1-8243-49b5-a64f-27d55fbdc585-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.709791 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5cf6d5447f-sb6mg" event={"ID":"0d17fa55-5782-4674-bc08-1c264223c89a","Type":"ContainerStarted","Data":"71f7a19f4a637022a5873b3e30c27b6dc1f6683bb4cea945b48b991f8315985b"} Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.710012 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-5cf6d5447f-sb6mg" Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.712925 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-c6fb8fd-gbdfx" event={"ID":"ac383c08-a746-47db-89d9-228585d98c8d","Type":"ContainerStarted","Data":"dd45d060998f4672436fe6db85c4f2dab029b120b107862732d2adf59207ebe3"} Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.713242 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-c6fb8fd-gbdfx" event={"ID":"ac383c08-a746-47db-89d9-228585d98c8d","Type":"ContainerStarted","Data":"a7a45a0241771c6ada78eee06ccef32ca973c475c6c1dcf9284f5daa09742887"} Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.713940 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-c6fb8fd-gbdfx" Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.719422 4903 generic.go:334] "Generic (PLEG): container finished" podID="87524cd1-8243-49b5-a64f-27d55fbdc585" containerID="d0ad6806e431115a2923521dad0f4d337f67fcdcd9b0141d73d81c9321a83dd0" exitCode=0 Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.719479 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lkc95" event={"ID":"87524cd1-8243-49b5-a64f-27d55fbdc585","Type":"ContainerDied","Data":"d0ad6806e431115a2923521dad0f4d337f67fcdcd9b0141d73d81c9321a83dd0"} Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.719492 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lkc95" Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.719517 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lkc95" event={"ID":"87524cd1-8243-49b5-a64f-27d55fbdc585","Type":"ContainerDied","Data":"adfc35ad08396e0a668a8787c7ca2a018b33f742dbd973483a6c13950aca1868"} Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.719538 4903 scope.go:117] "RemoveContainer" containerID="d0ad6806e431115a2923521dad0f4d337f67fcdcd9b0141d73d81c9321a83dd0" Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.759528 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-5cf6d5447f-sb6mg" podStartSLOduration=2.759509986 podStartE2EDuration="2.759509986s" podCreationTimestamp="2025-11-27 00:14:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-27 00:14:17.744432424 +0000 UTC m=+6786.434667334" watchObservedRunningTime="2025-11-27 00:14:17.759509986 +0000 UTC m=+6786.449744896" Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.766199 4903 scope.go:117] "RemoveContainer" containerID="303a1cddf924b80ae1753277e951b06c2fb4d5be8bb52f5427c6b4f58dfb6305" Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.798337 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-c6fb8fd-gbdfx" podStartSLOduration=2.798320343 podStartE2EDuration="2.798320343s" podCreationTimestamp="2025-11-27 00:14:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-27 00:14:17.767155141 +0000 UTC m=+6786.457390071" watchObservedRunningTime="2025-11-27 00:14:17.798320343 +0000 UTC m=+6786.488555253" Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.838826 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lkc95"] Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.850118 4903 scope.go:117] "RemoveContainer" containerID="1b73d6cbeadb32e1ea272999304d537ea64fe57d60193a97477e25732aa8c7b9" Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.857668 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-lkc95"] Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.874631 4903 scope.go:117] "RemoveContainer" containerID="d0ad6806e431115a2923521dad0f4d337f67fcdcd9b0141d73d81c9321a83dd0" Nov 27 00:14:17 crc kubenswrapper[4903]: E1127 00:14:17.875109 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d0ad6806e431115a2923521dad0f4d337f67fcdcd9b0141d73d81c9321a83dd0\": container with ID starting with d0ad6806e431115a2923521dad0f4d337f67fcdcd9b0141d73d81c9321a83dd0 not found: ID does not exist" containerID="d0ad6806e431115a2923521dad0f4d337f67fcdcd9b0141d73d81c9321a83dd0" Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.875179 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d0ad6806e431115a2923521dad0f4d337f67fcdcd9b0141d73d81c9321a83dd0"} err="failed to get container status \"d0ad6806e431115a2923521dad0f4d337f67fcdcd9b0141d73d81c9321a83dd0\": rpc error: code = NotFound desc = could not find container \"d0ad6806e431115a2923521dad0f4d337f67fcdcd9b0141d73d81c9321a83dd0\": container with ID starting with d0ad6806e431115a2923521dad0f4d337f67fcdcd9b0141d73d81c9321a83dd0 not found: ID does not exist" Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.875216 4903 scope.go:117] "RemoveContainer" containerID="303a1cddf924b80ae1753277e951b06c2fb4d5be8bb52f5427c6b4f58dfb6305" Nov 27 00:14:17 crc kubenswrapper[4903]: E1127 00:14:17.875709 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"303a1cddf924b80ae1753277e951b06c2fb4d5be8bb52f5427c6b4f58dfb6305\": container with ID starting with 303a1cddf924b80ae1753277e951b06c2fb4d5be8bb52f5427c6b4f58dfb6305 not found: ID does not exist" containerID="303a1cddf924b80ae1753277e951b06c2fb4d5be8bb52f5427c6b4f58dfb6305" Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.875737 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"303a1cddf924b80ae1753277e951b06c2fb4d5be8bb52f5427c6b4f58dfb6305"} err="failed to get container status \"303a1cddf924b80ae1753277e951b06c2fb4d5be8bb52f5427c6b4f58dfb6305\": rpc error: code = NotFound desc = could not find container \"303a1cddf924b80ae1753277e951b06c2fb4d5be8bb52f5427c6b4f58dfb6305\": container with ID starting with 303a1cddf924b80ae1753277e951b06c2fb4d5be8bb52f5427c6b4f58dfb6305 not found: ID does not exist" Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.875758 4903 scope.go:117] "RemoveContainer" containerID="1b73d6cbeadb32e1ea272999304d537ea64fe57d60193a97477e25732aa8c7b9" Nov 27 00:14:17 crc kubenswrapper[4903]: E1127 00:14:17.876019 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b73d6cbeadb32e1ea272999304d537ea64fe57d60193a97477e25732aa8c7b9\": container with ID starting with 1b73d6cbeadb32e1ea272999304d537ea64fe57d60193a97477e25732aa8c7b9 not found: ID does not exist" containerID="1b73d6cbeadb32e1ea272999304d537ea64fe57d60193a97477e25732aa8c7b9" Nov 27 00:14:17 crc kubenswrapper[4903]: I1127 00:14:17.876048 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b73d6cbeadb32e1ea272999304d537ea64fe57d60193a97477e25732aa8c7b9"} err="failed to get container status \"1b73d6cbeadb32e1ea272999304d537ea64fe57d60193a97477e25732aa8c7b9\": rpc error: code = NotFound desc = could not find container \"1b73d6cbeadb32e1ea272999304d537ea64fe57d60193a97477e25732aa8c7b9\": container with ID starting with 1b73d6cbeadb32e1ea272999304d537ea64fe57d60193a97477e25732aa8c7b9 not found: ID does not exist" Nov 27 00:14:18 crc kubenswrapper[4903]: I1127 00:14:18.042365 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87524cd1-8243-49b5-a64f-27d55fbdc585" path="/var/lib/kubelet/pods/87524cd1-8243-49b5-a64f-27d55fbdc585/volumes" Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.790830 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["metallb-system/frr-k8s-qxrcx"] Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.791742 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/frr-k8s-qxrcx" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerName="controller" containerID="cri-o://f348679967e52c62752f42b001275a4cee1b022a4336d16421358cb8048332cb" gracePeriod=2 Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.792085 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/frr-k8s-qxrcx" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerName="kube-rbac-proxy" containerID="cri-o://fafe3b2dcaf18806767c862ffaf70758fc8a5b050ea74bfcf011b49c5621f8c9" gracePeriod=2 Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.792224 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/frr-k8s-qxrcx" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerName="frr-metrics" containerID="cri-o://35422cd04228e0dba9a40355200b7370644eb84e414493d6a0b1ecdc48d55373" gracePeriod=2 Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.792269 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/frr-k8s-qxrcx" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerName="reloader" containerID="cri-o://ac099ea3ebd7e1d6ce9c0dc4b9ae7e278c5dfb8ba676037baeb02deccd638d52" gracePeriod=2 Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.792278 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/frr-k8s-qxrcx" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerName="kube-rbac-proxy-frr" containerID="cri-o://c4447939d076ee9886aa5135a2dae7189ba347d6de35dfefd6064a8138d4973c" gracePeriod=2 Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.792305 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/frr-k8s-qxrcx" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerName="frr" containerID="cri-o://f69da2585c146bc9a444441ea910f352fa960b9134b70230caa761cc423b17e9" gracePeriod=2 Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.806796 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["metallb-system/frr-k8s-qxrcx"] Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.878983 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-m5t9q"] Nov 27 00:14:26 crc kubenswrapper[4903]: E1127 00:14:26.879494 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerName="cp-metrics" Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.879506 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerName="cp-metrics" Nov 27 00:14:26 crc kubenswrapper[4903]: E1127 00:14:26.879528 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87524cd1-8243-49b5-a64f-27d55fbdc585" containerName="extract-content" Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.879533 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="87524cd1-8243-49b5-a64f-27d55fbdc585" containerName="extract-content" Nov 27 00:14:26 crc kubenswrapper[4903]: E1127 00:14:26.879558 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87524cd1-8243-49b5-a64f-27d55fbdc585" containerName="registry-server" Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.879564 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="87524cd1-8243-49b5-a64f-27d55fbdc585" containerName="registry-server" Nov 27 00:14:26 crc kubenswrapper[4903]: E1127 00:14:26.879577 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerName="frr" Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.879583 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerName="frr" Nov 27 00:14:26 crc kubenswrapper[4903]: E1127 00:14:26.879598 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerName="cp-reloader" Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.879605 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerName="cp-reloader" Nov 27 00:14:26 crc kubenswrapper[4903]: E1127 00:14:26.879621 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerName="cp-frr-files" Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.879626 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerName="cp-frr-files" Nov 27 00:14:26 crc kubenswrapper[4903]: E1127 00:14:26.879646 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerName="frr-metrics" Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.879653 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerName="frr-metrics" Nov 27 00:14:26 crc kubenswrapper[4903]: E1127 00:14:26.879666 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87524cd1-8243-49b5-a64f-27d55fbdc585" containerName="extract-utilities" Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.879672 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="87524cd1-8243-49b5-a64f-27d55fbdc585" containerName="extract-utilities" Nov 27 00:14:26 crc kubenswrapper[4903]: E1127 00:14:26.879681 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerName="reloader" Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.879687 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerName="reloader" Nov 27 00:14:26 crc kubenswrapper[4903]: E1127 00:14:26.879716 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerName="controller" Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.879722 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerName="controller" Nov 27 00:14:26 crc kubenswrapper[4903]: E1127 00:14:26.879739 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerName="kube-rbac-proxy-frr" Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.879746 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerName="kube-rbac-proxy-frr" Nov 27 00:14:26 crc kubenswrapper[4903]: E1127 00:14:26.879766 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerName="kube-rbac-proxy" Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.879773 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerName="kube-rbac-proxy" Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.879964 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerName="kube-rbac-proxy" Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.879986 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5900302-4880-4732-a477-8ed6cf3bfec3" containerName="manager" Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.879999 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerName="reloader" Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.880011 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerName="frr-metrics" Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.880025 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerName="kube-rbac-proxy-frr" Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.880038 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerName="frr" Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.880053 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="87524cd1-8243-49b5-a64f-27d55fbdc585" containerName="registry-server" Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.880062 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerName="controller" Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.880847 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-m5t9q" Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.918770 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-m5t9q"] Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.969021 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-7mw6b"] Nov 27 00:14:26 crc kubenswrapper[4903]: I1127 00:14:26.987786 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-7mw6b" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.017586 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["metallb-system/speaker-f2g89"] Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.017862 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/speaker-f2g89" podUID="61e82f3d-2aca-46e7-bd0f-12c8b492c14e" containerName="speaker" containerID="cri-o://0e960cb21fd013ca0d020ef86d33af21676c2ae93eef43e993196fd020c09155" gracePeriod=2 Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.018009 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/speaker-f2g89" podUID="61e82f3d-2aca-46e7-bd0f-12c8b492c14e" containerName="kube-rbac-proxy" containerID="cri-o://dd570f288494b8a28784fc29ad92ca1441a6c34103222206a251e5c21b586568" gracePeriod=2 Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.034710 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/e26a694c-d57e-4087-b30b-bca10278c77c-frr-conf\") pod \"frr-k8s-7mw6b\" (UID: \"e26a694c-d57e-4087-b30b-bca10278c77c\") " pod="metallb-system/frr-k8s-7mw6b" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.034746 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/e26a694c-d57e-4087-b30b-bca10278c77c-frr-sockets\") pod \"frr-k8s-7mw6b\" (UID: \"e26a694c-d57e-4087-b30b-bca10278c77c\") " pod="metallb-system/frr-k8s-7mw6b" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.034794 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/e26a694c-d57e-4087-b30b-bca10278c77c-reloader\") pod \"frr-k8s-7mw6b\" (UID: \"e26a694c-d57e-4087-b30b-bca10278c77c\") " pod="metallb-system/frr-k8s-7mw6b" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.034813 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/e26a694c-d57e-4087-b30b-bca10278c77c-metrics\") pod \"frr-k8s-7mw6b\" (UID: \"e26a694c-d57e-4087-b30b-bca10278c77c\") " pod="metallb-system/frr-k8s-7mw6b" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.034853 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/e26a694c-d57e-4087-b30b-bca10278c77c-frr-startup\") pod \"frr-k8s-7mw6b\" (UID: \"e26a694c-d57e-4087-b30b-bca10278c77c\") " pod="metallb-system/frr-k8s-7mw6b" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.034901 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vx28\" (UniqueName: \"kubernetes.io/projected/b4378de9-e941-4007-b3ae-8471c5ace362-kube-api-access-8vx28\") pod \"frr-k8s-webhook-server-7fcb986d4-m5t9q\" (UID: \"b4378de9-e941-4007-b3ae-8471c5ace362\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-m5t9q" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.034947 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5l6zc\" (UniqueName: \"kubernetes.io/projected/e26a694c-d57e-4087-b30b-bca10278c77c-kube-api-access-5l6zc\") pod \"frr-k8s-7mw6b\" (UID: \"e26a694c-d57e-4087-b30b-bca10278c77c\") " pod="metallb-system/frr-k8s-7mw6b" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.035037 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b4378de9-e941-4007-b3ae-8471c5ace362-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-m5t9q\" (UID: \"b4378de9-e941-4007-b3ae-8471c5ace362\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-m5t9q" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.035063 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e26a694c-d57e-4087-b30b-bca10278c77c-metrics-certs\") pod \"frr-k8s-7mw6b\" (UID: \"e26a694c-d57e-4087-b30b-bca10278c77c\") " pod="metallb-system/frr-k8s-7mw6b" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.054607 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["metallb-system/speaker-f2g89"] Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.071239 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-f8648f98b-5xfj8"] Nov 27 00:14:27 crc kubenswrapper[4903]: E1127 00:14:27.071821 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61e82f3d-2aca-46e7-bd0f-12c8b492c14e" containerName="speaker" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.071842 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="61e82f3d-2aca-46e7-bd0f-12c8b492c14e" containerName="speaker" Nov 27 00:14:27 crc kubenswrapper[4903]: E1127 00:14:27.071903 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61e82f3d-2aca-46e7-bd0f-12c8b492c14e" containerName="kube-rbac-proxy" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.071910 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="61e82f3d-2aca-46e7-bd0f-12c8b492c14e" containerName="kube-rbac-proxy" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.072107 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="61e82f3d-2aca-46e7-bd0f-12c8b492c14e" containerName="kube-rbac-proxy" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.072133 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="61e82f3d-2aca-46e7-bd0f-12c8b492c14e" containerName="speaker" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.073351 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-5xfj8" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.086210 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-bnlxc"] Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.089113 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-bnlxc" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.135390 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-5xfj8"] Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.138271 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/e26a694c-d57e-4087-b30b-bca10278c77c-reloader\") pod \"frr-k8s-7mw6b\" (UID: \"e26a694c-d57e-4087-b30b-bca10278c77c\") " pod="metallb-system/frr-k8s-7mw6b" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.138316 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/e26a694c-d57e-4087-b30b-bca10278c77c-metrics\") pod \"frr-k8s-7mw6b\" (UID: \"e26a694c-d57e-4087-b30b-bca10278c77c\") " pod="metallb-system/frr-k8s-7mw6b" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.138352 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/55cc9b1f-3ec2-47c4-9ab2-1047f4b54117-memberlist\") pod \"speaker-bnlxc\" (UID: \"55cc9b1f-3ec2-47c4-9ab2-1047f4b54117\") " pod="metallb-system/speaker-bnlxc" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.138428 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/de51f671-d120-440f-8e93-9862dafbca57-metrics-certs\") pod \"controller-f8648f98b-5xfj8\" (UID: \"de51f671-d120-440f-8e93-9862dafbca57\") " pod="metallb-system/controller-f8648f98b-5xfj8" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.138513 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/e26a694c-d57e-4087-b30b-bca10278c77c-frr-startup\") pod \"frr-k8s-7mw6b\" (UID: \"e26a694c-d57e-4087-b30b-bca10278c77c\") " pod="metallb-system/frr-k8s-7mw6b" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.138666 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/55cc9b1f-3ec2-47c4-9ab2-1047f4b54117-metrics-certs\") pod \"speaker-bnlxc\" (UID: \"55cc9b1f-3ec2-47c4-9ab2-1047f4b54117\") " pod="metallb-system/speaker-bnlxc" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.138722 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vx28\" (UniqueName: \"kubernetes.io/projected/b4378de9-e941-4007-b3ae-8471c5ace362-kube-api-access-8vx28\") pod \"frr-k8s-webhook-server-7fcb986d4-m5t9q\" (UID: \"b4378de9-e941-4007-b3ae-8471c5ace362\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-m5t9q" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.138824 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5l6zc\" (UniqueName: \"kubernetes.io/projected/e26a694c-d57e-4087-b30b-bca10278c77c-kube-api-access-5l6zc\") pod \"frr-k8s-7mw6b\" (UID: \"e26a694c-d57e-4087-b30b-bca10278c77c\") " pod="metallb-system/frr-k8s-7mw6b" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.138901 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/55cc9b1f-3ec2-47c4-9ab2-1047f4b54117-metallb-excludel2\") pod \"speaker-bnlxc\" (UID: \"55cc9b1f-3ec2-47c4-9ab2-1047f4b54117\") " pod="metallb-system/speaker-bnlxc" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.138923 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-27kqg\" (UniqueName: \"kubernetes.io/projected/de51f671-d120-440f-8e93-9862dafbca57-kube-api-access-27kqg\") pod \"controller-f8648f98b-5xfj8\" (UID: \"de51f671-d120-440f-8e93-9862dafbca57\") " pod="metallb-system/controller-f8648f98b-5xfj8" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.139045 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wmdzl\" (UniqueName: \"kubernetes.io/projected/55cc9b1f-3ec2-47c4-9ab2-1047f4b54117-kube-api-access-wmdzl\") pod \"speaker-bnlxc\" (UID: \"55cc9b1f-3ec2-47c4-9ab2-1047f4b54117\") " pod="metallb-system/speaker-bnlxc" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.139122 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b4378de9-e941-4007-b3ae-8471c5ace362-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-m5t9q\" (UID: \"b4378de9-e941-4007-b3ae-8471c5ace362\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-m5t9q" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.139229 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e26a694c-d57e-4087-b30b-bca10278c77c-metrics-certs\") pod \"frr-k8s-7mw6b\" (UID: \"e26a694c-d57e-4087-b30b-bca10278c77c\") " pod="metallb-system/frr-k8s-7mw6b" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.139294 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/e26a694c-d57e-4087-b30b-bca10278c77c-frr-conf\") pod \"frr-k8s-7mw6b\" (UID: \"e26a694c-d57e-4087-b30b-bca10278c77c\") " pod="metallb-system/frr-k8s-7mw6b" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.139327 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/e26a694c-d57e-4087-b30b-bca10278c77c-frr-sockets\") pod \"frr-k8s-7mw6b\" (UID: \"e26a694c-d57e-4087-b30b-bca10278c77c\") " pod="metallb-system/frr-k8s-7mw6b" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.139367 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/de51f671-d120-440f-8e93-9862dafbca57-cert\") pod \"controller-f8648f98b-5xfj8\" (UID: \"de51f671-d120-440f-8e93-9862dafbca57\") " pod="metallb-system/controller-f8648f98b-5xfj8" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.139838 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/e26a694c-d57e-4087-b30b-bca10278c77c-reloader\") pod \"frr-k8s-7mw6b\" (UID: \"e26a694c-d57e-4087-b30b-bca10278c77c\") " pod="metallb-system/frr-k8s-7mw6b" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.140715 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/e26a694c-d57e-4087-b30b-bca10278c77c-metrics\") pod \"frr-k8s-7mw6b\" (UID: \"e26a694c-d57e-4087-b30b-bca10278c77c\") " pod="metallb-system/frr-k8s-7mw6b" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.140914 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/e26a694c-d57e-4087-b30b-bca10278c77c-frr-conf\") pod \"frr-k8s-7mw6b\" (UID: \"e26a694c-d57e-4087-b30b-bca10278c77c\") " pod="metallb-system/frr-k8s-7mw6b" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.141581 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/e26a694c-d57e-4087-b30b-bca10278c77c-frr-startup\") pod \"frr-k8s-7mw6b\" (UID: \"e26a694c-d57e-4087-b30b-bca10278c77c\") " pod="metallb-system/frr-k8s-7mw6b" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.142590 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/e26a694c-d57e-4087-b30b-bca10278c77c-frr-sockets\") pod \"frr-k8s-7mw6b\" (UID: \"e26a694c-d57e-4087-b30b-bca10278c77c\") " pod="metallb-system/frr-k8s-7mw6b" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.162723 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e26a694c-d57e-4087-b30b-bca10278c77c-metrics-certs\") pod \"frr-k8s-7mw6b\" (UID: \"e26a694c-d57e-4087-b30b-bca10278c77c\") " pod="metallb-system/frr-k8s-7mw6b" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.174324 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vx28\" (UniqueName: \"kubernetes.io/projected/b4378de9-e941-4007-b3ae-8471c5ace362-kube-api-access-8vx28\") pod \"frr-k8s-webhook-server-7fcb986d4-m5t9q\" (UID: \"b4378de9-e941-4007-b3ae-8471c5ace362\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-m5t9q" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.175829 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b4378de9-e941-4007-b3ae-8471c5ace362-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-m5t9q\" (UID: \"b4378de9-e941-4007-b3ae-8471c5ace362\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-m5t9q" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.180652 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5l6zc\" (UniqueName: \"kubernetes.io/projected/e26a694c-d57e-4087-b30b-bca10278c77c-kube-api-access-5l6zc\") pod \"frr-k8s-7mw6b\" (UID: \"e26a694c-d57e-4087-b30b-bca10278c77c\") " pod="metallb-system/frr-k8s-7mw6b" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.241443 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wmdzl\" (UniqueName: \"kubernetes.io/projected/55cc9b1f-3ec2-47c4-9ab2-1047f4b54117-kube-api-access-wmdzl\") pod \"speaker-bnlxc\" (UID: \"55cc9b1f-3ec2-47c4-9ab2-1047f4b54117\") " pod="metallb-system/speaker-bnlxc" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.241552 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/de51f671-d120-440f-8e93-9862dafbca57-cert\") pod \"controller-f8648f98b-5xfj8\" (UID: \"de51f671-d120-440f-8e93-9862dafbca57\") " pod="metallb-system/controller-f8648f98b-5xfj8" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.241641 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/55cc9b1f-3ec2-47c4-9ab2-1047f4b54117-memberlist\") pod \"speaker-bnlxc\" (UID: \"55cc9b1f-3ec2-47c4-9ab2-1047f4b54117\") " pod="metallb-system/speaker-bnlxc" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.241681 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/de51f671-d120-440f-8e93-9862dafbca57-metrics-certs\") pod \"controller-f8648f98b-5xfj8\" (UID: \"de51f671-d120-440f-8e93-9862dafbca57\") " pod="metallb-system/controller-f8648f98b-5xfj8" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.241788 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/55cc9b1f-3ec2-47c4-9ab2-1047f4b54117-metrics-certs\") pod \"speaker-bnlxc\" (UID: \"55cc9b1f-3ec2-47c4-9ab2-1047f4b54117\") " pod="metallb-system/speaker-bnlxc" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.241890 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/55cc9b1f-3ec2-47c4-9ab2-1047f4b54117-metallb-excludel2\") pod \"speaker-bnlxc\" (UID: \"55cc9b1f-3ec2-47c4-9ab2-1047f4b54117\") " pod="metallb-system/speaker-bnlxc" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.241910 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-27kqg\" (UniqueName: \"kubernetes.io/projected/de51f671-d120-440f-8e93-9862dafbca57-kube-api-access-27kqg\") pod \"controller-f8648f98b-5xfj8\" (UID: \"de51f671-d120-440f-8e93-9862dafbca57\") " pod="metallb-system/controller-f8648f98b-5xfj8" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.246046 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/de51f671-d120-440f-8e93-9862dafbca57-cert\") pod \"controller-f8648f98b-5xfj8\" (UID: \"de51f671-d120-440f-8e93-9862dafbca57\") " pod="metallb-system/controller-f8648f98b-5xfj8" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.254830 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/55cc9b1f-3ec2-47c4-9ab2-1047f4b54117-metallb-excludel2\") pod \"speaker-bnlxc\" (UID: \"55cc9b1f-3ec2-47c4-9ab2-1047f4b54117\") " pod="metallb-system/speaker-bnlxc" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.255254 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/de51f671-d120-440f-8e93-9862dafbca57-metrics-certs\") pod \"controller-f8648f98b-5xfj8\" (UID: \"de51f671-d120-440f-8e93-9862dafbca57\") " pod="metallb-system/controller-f8648f98b-5xfj8" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.256007 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/55cc9b1f-3ec2-47c4-9ab2-1047f4b54117-memberlist\") pod \"speaker-bnlxc\" (UID: \"55cc9b1f-3ec2-47c4-9ab2-1047f4b54117\") " pod="metallb-system/speaker-bnlxc" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.259166 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/55cc9b1f-3ec2-47c4-9ab2-1047f4b54117-metrics-certs\") pod \"speaker-bnlxc\" (UID: \"55cc9b1f-3ec2-47c4-9ab2-1047f4b54117\") " pod="metallb-system/speaker-bnlxc" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.263963 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wmdzl\" (UniqueName: \"kubernetes.io/projected/55cc9b1f-3ec2-47c4-9ab2-1047f4b54117-kube-api-access-wmdzl\") pod \"speaker-bnlxc\" (UID: \"55cc9b1f-3ec2-47c4-9ab2-1047f4b54117\") " pod="metallb-system/speaker-bnlxc" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.265014 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-27kqg\" (UniqueName: \"kubernetes.io/projected/de51f671-d120-440f-8e93-9862dafbca57-kube-api-access-27kqg\") pod \"controller-f8648f98b-5xfj8\" (UID: \"de51f671-d120-440f-8e93-9862dafbca57\") " pod="metallb-system/controller-f8648f98b-5xfj8" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.323520 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-m5t9q" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.351068 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-7mw6b" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.365017 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-5xfj8" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.376758 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-bnlxc" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.385603 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-qxrcx" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.550216 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/2406eb0a-073a-4339-98f2-baa11ceacaa4-reloader\") pod \"2406eb0a-073a-4339-98f2-baa11ceacaa4\" (UID: \"2406eb0a-073a-4339-98f2-baa11ceacaa4\") " Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.550636 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2406eb0a-073a-4339-98f2-baa11ceacaa4-metrics-certs\") pod \"2406eb0a-073a-4339-98f2-baa11ceacaa4\" (UID: \"2406eb0a-073a-4339-98f2-baa11ceacaa4\") " Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.551063 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2406eb0a-073a-4339-98f2-baa11ceacaa4-reloader" (OuterVolumeSpecName: "reloader") pod "2406eb0a-073a-4339-98f2-baa11ceacaa4" (UID: "2406eb0a-073a-4339-98f2-baa11ceacaa4"). InnerVolumeSpecName "reloader". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.551352 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/2406eb0a-073a-4339-98f2-baa11ceacaa4-frr-conf\") pod \"2406eb0a-073a-4339-98f2-baa11ceacaa4\" (UID: \"2406eb0a-073a-4339-98f2-baa11ceacaa4\") " Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.551463 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t5bhh\" (UniqueName: \"kubernetes.io/projected/2406eb0a-073a-4339-98f2-baa11ceacaa4-kube-api-access-t5bhh\") pod \"2406eb0a-073a-4339-98f2-baa11ceacaa4\" (UID: \"2406eb0a-073a-4339-98f2-baa11ceacaa4\") " Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.551635 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/2406eb0a-073a-4339-98f2-baa11ceacaa4-frr-startup\") pod \"2406eb0a-073a-4339-98f2-baa11ceacaa4\" (UID: \"2406eb0a-073a-4339-98f2-baa11ceacaa4\") " Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.551685 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/2406eb0a-073a-4339-98f2-baa11ceacaa4-metrics\") pod \"2406eb0a-073a-4339-98f2-baa11ceacaa4\" (UID: \"2406eb0a-073a-4339-98f2-baa11ceacaa4\") " Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.551794 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/2406eb0a-073a-4339-98f2-baa11ceacaa4-frr-sockets\") pod \"2406eb0a-073a-4339-98f2-baa11ceacaa4\" (UID: \"2406eb0a-073a-4339-98f2-baa11ceacaa4\") " Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.552583 4903 reconciler_common.go:293] "Volume detached for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/2406eb0a-073a-4339-98f2-baa11ceacaa4-reloader\") on node \"crc\" DevicePath \"\"" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.553853 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2406eb0a-073a-4339-98f2-baa11ceacaa4-frr-sockets" (OuterVolumeSpecName: "frr-sockets") pod "2406eb0a-073a-4339-98f2-baa11ceacaa4" (UID: "2406eb0a-073a-4339-98f2-baa11ceacaa4"). InnerVolumeSpecName "frr-sockets". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.553918 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2406eb0a-073a-4339-98f2-baa11ceacaa4-frr-conf" (OuterVolumeSpecName: "frr-conf") pod "2406eb0a-073a-4339-98f2-baa11ceacaa4" (UID: "2406eb0a-073a-4339-98f2-baa11ceacaa4"). InnerVolumeSpecName "frr-conf". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.554649 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2406eb0a-073a-4339-98f2-baa11ceacaa4-frr-startup" (OuterVolumeSpecName: "frr-startup") pod "2406eb0a-073a-4339-98f2-baa11ceacaa4" (UID: "2406eb0a-073a-4339-98f2-baa11ceacaa4"). InnerVolumeSpecName "frr-startup". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.555128 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2406eb0a-073a-4339-98f2-baa11ceacaa4-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "2406eb0a-073a-4339-98f2-baa11ceacaa4" (UID: "2406eb0a-073a-4339-98f2-baa11ceacaa4"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.556322 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2406eb0a-073a-4339-98f2-baa11ceacaa4-metrics" (OuterVolumeSpecName: "metrics") pod "2406eb0a-073a-4339-98f2-baa11ceacaa4" (UID: "2406eb0a-073a-4339-98f2-baa11ceacaa4"). InnerVolumeSpecName "metrics". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.558189 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2406eb0a-073a-4339-98f2-baa11ceacaa4-kube-api-access-t5bhh" (OuterVolumeSpecName: "kube-api-access-t5bhh") pod "2406eb0a-073a-4339-98f2-baa11ceacaa4" (UID: "2406eb0a-073a-4339-98f2-baa11ceacaa4"). InnerVolumeSpecName "kube-api-access-t5bhh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.655346 4903 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2406eb0a-073a-4339-98f2-baa11ceacaa4-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.655373 4903 reconciler_common.go:293] "Volume detached for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/2406eb0a-073a-4339-98f2-baa11ceacaa4-frr-conf\") on node \"crc\" DevicePath \"\"" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.655382 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t5bhh\" (UniqueName: \"kubernetes.io/projected/2406eb0a-073a-4339-98f2-baa11ceacaa4-kube-api-access-t5bhh\") on node \"crc\" DevicePath \"\"" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.655414 4903 reconciler_common.go:293] "Volume detached for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/2406eb0a-073a-4339-98f2-baa11ceacaa4-frr-startup\") on node \"crc\" DevicePath \"\"" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.655423 4903 reconciler_common.go:293] "Volume detached for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/2406eb0a-073a-4339-98f2-baa11ceacaa4-metrics\") on node \"crc\" DevicePath \"\"" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.655431 4903 reconciler_common.go:293] "Volume detached for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/2406eb0a-073a-4339-98f2-baa11ceacaa4-frr-sockets\") on node \"crc\" DevicePath \"\"" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.684748 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-f2g89" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.756677 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gdllz\" (UniqueName: \"kubernetes.io/projected/61e82f3d-2aca-46e7-bd0f-12c8b492c14e-kube-api-access-gdllz\") pod \"61e82f3d-2aca-46e7-bd0f-12c8b492c14e\" (UID: \"61e82f3d-2aca-46e7-bd0f-12c8b492c14e\") " Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.757034 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/61e82f3d-2aca-46e7-bd0f-12c8b492c14e-memberlist\") pod \"61e82f3d-2aca-46e7-bd0f-12c8b492c14e\" (UID: \"61e82f3d-2aca-46e7-bd0f-12c8b492c14e\") " Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.757206 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/61e82f3d-2aca-46e7-bd0f-12c8b492c14e-metallb-excludel2\") pod \"61e82f3d-2aca-46e7-bd0f-12c8b492c14e\" (UID: \"61e82f3d-2aca-46e7-bd0f-12c8b492c14e\") " Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.757343 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/61e82f3d-2aca-46e7-bd0f-12c8b492c14e-metrics-certs\") pod \"61e82f3d-2aca-46e7-bd0f-12c8b492c14e\" (UID: \"61e82f3d-2aca-46e7-bd0f-12c8b492c14e\") " Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.761546 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61e82f3d-2aca-46e7-bd0f-12c8b492c14e-metallb-excludel2" (OuterVolumeSpecName: "metallb-excludel2") pod "61e82f3d-2aca-46e7-bd0f-12c8b492c14e" (UID: "61e82f3d-2aca-46e7-bd0f-12c8b492c14e"). InnerVolumeSpecName "metallb-excludel2". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.773337 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61e82f3d-2aca-46e7-bd0f-12c8b492c14e-memberlist" (OuterVolumeSpecName: "memberlist") pod "61e82f3d-2aca-46e7-bd0f-12c8b492c14e" (UID: "61e82f3d-2aca-46e7-bd0f-12c8b492c14e"). InnerVolumeSpecName "memberlist". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.773364 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61e82f3d-2aca-46e7-bd0f-12c8b492c14e-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "61e82f3d-2aca-46e7-bd0f-12c8b492c14e" (UID: "61e82f3d-2aca-46e7-bd0f-12c8b492c14e"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.773434 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61e82f3d-2aca-46e7-bd0f-12c8b492c14e-kube-api-access-gdllz" (OuterVolumeSpecName: "kube-api-access-gdllz") pod "61e82f3d-2aca-46e7-bd0f-12c8b492c14e" (UID: "61e82f3d-2aca-46e7-bd0f-12c8b492c14e"). InnerVolumeSpecName "kube-api-access-gdllz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.861125 4903 reconciler_common.go:293] "Volume detached for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/61e82f3d-2aca-46e7-bd0f-12c8b492c14e-metallb-excludel2\") on node \"crc\" DevicePath \"\"" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.862548 4903 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/61e82f3d-2aca-46e7-bd0f-12c8b492c14e-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.862566 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gdllz\" (UniqueName: \"kubernetes.io/projected/61e82f3d-2aca-46e7-bd0f-12c8b492c14e-kube-api-access-gdllz\") on node \"crc\" DevicePath \"\"" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.862576 4903 reconciler_common.go:293] "Volume detached for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/61e82f3d-2aca-46e7-bd0f-12c8b492c14e-memberlist\") on node \"crc\" DevicePath \"\"" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.882347 4903 generic.go:334] "Generic (PLEG): container finished" podID="61e82f3d-2aca-46e7-bd0f-12c8b492c14e" containerID="dd570f288494b8a28784fc29ad92ca1441a6c34103222206a251e5c21b586568" exitCode=0 Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.882395 4903 generic.go:334] "Generic (PLEG): container finished" podID="61e82f3d-2aca-46e7-bd0f-12c8b492c14e" containerID="0e960cb21fd013ca0d020ef86d33af21676c2ae93eef43e993196fd020c09155" exitCode=0 Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.882485 4903 scope.go:117] "RemoveContainer" containerID="dd570f288494b8a28784fc29ad92ca1441a6c34103222206a251e5c21b586568" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.882666 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-f2g89" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.935493 4903 generic.go:334] "Generic (PLEG): container finished" podID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerID="c4447939d076ee9886aa5135a2dae7189ba347d6de35dfefd6064a8138d4973c" exitCode=0 Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.935525 4903 generic.go:334] "Generic (PLEG): container finished" podID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerID="fafe3b2dcaf18806767c862ffaf70758fc8a5b050ea74bfcf011b49c5621f8c9" exitCode=0 Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.935532 4903 generic.go:334] "Generic (PLEG): container finished" podID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerID="35422cd04228e0dba9a40355200b7370644eb84e414493d6a0b1ecdc48d55373" exitCode=143 Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.935539 4903 generic.go:334] "Generic (PLEG): container finished" podID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerID="ac099ea3ebd7e1d6ce9c0dc4b9ae7e278c5dfb8ba676037baeb02deccd638d52" exitCode=0 Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.935546 4903 generic.go:334] "Generic (PLEG): container finished" podID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerID="f69da2585c146bc9a444441ea910f352fa960b9134b70230caa761cc423b17e9" exitCode=143 Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.935552 4903 generic.go:334] "Generic (PLEG): container finished" podID="2406eb0a-073a-4339-98f2-baa11ceacaa4" containerID="f348679967e52c62752f42b001275a4cee1b022a4336d16421358cb8048332cb" exitCode=0 Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.935900 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-qxrcx" Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.935606 4903 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c4447939d076ee9886aa5135a2dae7189ba347d6de35dfefd6064a8138d4973c"} Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.936206 4903 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fafe3b2dcaf18806767c862ffaf70758fc8a5b050ea74bfcf011b49c5621f8c9"} Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.936215 4903 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"35422cd04228e0dba9a40355200b7370644eb84e414493d6a0b1ecdc48d55373"} Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.936221 4903 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ac099ea3ebd7e1d6ce9c0dc4b9ae7e278c5dfb8ba676037baeb02deccd638d52"} Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.936226 4903 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f69da2585c146bc9a444441ea910f352fa960b9134b70230caa761cc423b17e9"} Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.936231 4903 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f348679967e52c62752f42b001275a4cee1b022a4336d16421358cb8048332cb"} Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.936235 4903 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4e42cad64ff42d36b9963b52437154707a0209d908848872d19cf2ef6c5491d3"} Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.936241 4903 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"18e7f1cdcd8f50ff37ba19a06b5ef9bb4c86a1190a035247c2ee5d98484f6c06"} Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.936246 4903 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"271b50da5c6c92ea0b2e59313c7c5816f9e97ce440d464e66ac83303b528355f"} Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.936256 4903 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c4447939d076ee9886aa5135a2dae7189ba347d6de35dfefd6064a8138d4973c"} Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.936262 4903 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fafe3b2dcaf18806767c862ffaf70758fc8a5b050ea74bfcf011b49c5621f8c9"} Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.936267 4903 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"35422cd04228e0dba9a40355200b7370644eb84e414493d6a0b1ecdc48d55373"} Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.936272 4903 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ac099ea3ebd7e1d6ce9c0dc4b9ae7e278c5dfb8ba676037baeb02deccd638d52"} Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.936277 4903 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f69da2585c146bc9a444441ea910f352fa960b9134b70230caa761cc423b17e9"} Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.936283 4903 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f348679967e52c62752f42b001275a4cee1b022a4336d16421358cb8048332cb"} Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.936287 4903 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4e42cad64ff42d36b9963b52437154707a0209d908848872d19cf2ef6c5491d3"} Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.936293 4903 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"18e7f1cdcd8f50ff37ba19a06b5ef9bb4c86a1190a035247c2ee5d98484f6c06"} Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.936298 4903 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"271b50da5c6c92ea0b2e59313c7c5816f9e97ce440d464e66ac83303b528355f"} Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.943956 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7mw6b" event={"ID":"e26a694c-d57e-4087-b30b-bca10278c77c","Type":"ContainerStarted","Data":"f2ab206abc3ffdb215ca236f7b97fe47d08fbbe595db9e9feb43f8235f740a20"} Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.946031 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-m5t9q"] Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.946427 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-bnlxc" event={"ID":"55cc9b1f-3ec2-47c4-9ab2-1047f4b54117","Type":"ContainerStarted","Data":"515de11aca12f2ed9078338ff65f6243c181de4d8dbbaf942bbe874fd8c715e1"} Nov 27 00:14:27 crc kubenswrapper[4903]: I1127 00:14:27.969606 4903 scope.go:117] "RemoveContainer" containerID="0e960cb21fd013ca0d020ef86d33af21676c2ae93eef43e993196fd020c09155" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.085464 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2406eb0a-073a-4339-98f2-baa11ceacaa4" path="/var/lib/kubelet/pods/2406eb0a-073a-4339-98f2-baa11ceacaa4/volumes" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.090495 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="61e82f3d-2aca-46e7-bd0f-12c8b492c14e" path="/var/lib/kubelet/pods/61e82f3d-2aca-46e7-bd0f-12c8b492c14e/volumes" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.147048 4903 scope.go:117] "RemoveContainer" containerID="dd570f288494b8a28784fc29ad92ca1441a6c34103222206a251e5c21b586568" Nov 27 00:14:28 crc kubenswrapper[4903]: E1127 00:14:28.147612 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dd570f288494b8a28784fc29ad92ca1441a6c34103222206a251e5c21b586568\": container with ID starting with dd570f288494b8a28784fc29ad92ca1441a6c34103222206a251e5c21b586568 not found: ID does not exist" containerID="dd570f288494b8a28784fc29ad92ca1441a6c34103222206a251e5c21b586568" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.147647 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd570f288494b8a28784fc29ad92ca1441a6c34103222206a251e5c21b586568"} err="failed to get container status \"dd570f288494b8a28784fc29ad92ca1441a6c34103222206a251e5c21b586568\": rpc error: code = NotFound desc = could not find container \"dd570f288494b8a28784fc29ad92ca1441a6c34103222206a251e5c21b586568\": container with ID starting with dd570f288494b8a28784fc29ad92ca1441a6c34103222206a251e5c21b586568 not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.147707 4903 scope.go:117] "RemoveContainer" containerID="0e960cb21fd013ca0d020ef86d33af21676c2ae93eef43e993196fd020c09155" Nov 27 00:14:28 crc kubenswrapper[4903]: E1127 00:14:28.147979 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0e960cb21fd013ca0d020ef86d33af21676c2ae93eef43e993196fd020c09155\": container with ID starting with 0e960cb21fd013ca0d020ef86d33af21676c2ae93eef43e993196fd020c09155 not found: ID does not exist" containerID="0e960cb21fd013ca0d020ef86d33af21676c2ae93eef43e993196fd020c09155" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.148017 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e960cb21fd013ca0d020ef86d33af21676c2ae93eef43e993196fd020c09155"} err="failed to get container status \"0e960cb21fd013ca0d020ef86d33af21676c2ae93eef43e993196fd020c09155\": rpc error: code = NotFound desc = could not find container \"0e960cb21fd013ca0d020ef86d33af21676c2ae93eef43e993196fd020c09155\": container with ID starting with 0e960cb21fd013ca0d020ef86d33af21676c2ae93eef43e993196fd020c09155 not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.148030 4903 scope.go:117] "RemoveContainer" containerID="dd570f288494b8a28784fc29ad92ca1441a6c34103222206a251e5c21b586568" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.149186 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd570f288494b8a28784fc29ad92ca1441a6c34103222206a251e5c21b586568"} err="failed to get container status \"dd570f288494b8a28784fc29ad92ca1441a6c34103222206a251e5c21b586568\": rpc error: code = NotFound desc = could not find container \"dd570f288494b8a28784fc29ad92ca1441a6c34103222206a251e5c21b586568\": container with ID starting with dd570f288494b8a28784fc29ad92ca1441a6c34103222206a251e5c21b586568 not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.149209 4903 scope.go:117] "RemoveContainer" containerID="0e960cb21fd013ca0d020ef86d33af21676c2ae93eef43e993196fd020c09155" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.150063 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e960cb21fd013ca0d020ef86d33af21676c2ae93eef43e993196fd020c09155"} err="failed to get container status \"0e960cb21fd013ca0d020ef86d33af21676c2ae93eef43e993196fd020c09155\": rpc error: code = NotFound desc = could not find container \"0e960cb21fd013ca0d020ef86d33af21676c2ae93eef43e993196fd020c09155\": container with ID starting with 0e960cb21fd013ca0d020ef86d33af21676c2ae93eef43e993196fd020c09155 not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.150089 4903 scope.go:117] "RemoveContainer" containerID="c4447939d076ee9886aa5135a2dae7189ba347d6de35dfefd6064a8138d4973c" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.181176 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-5xfj8"] Nov 27 00:14:28 crc kubenswrapper[4903]: W1127 00:14:28.204827 4903 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podde51f671_d120_440f_8e93_9862dafbca57.slice/crio-4e5f47e43d30b160cd97fd25f3bfeec882bc75235d9ed1e8c202049ec7158a08 WatchSource:0}: Error finding container 4e5f47e43d30b160cd97fd25f3bfeec882bc75235d9ed1e8c202049ec7158a08: Status 404 returned error can't find the container with id 4e5f47e43d30b160cd97fd25f3bfeec882bc75235d9ed1e8c202049ec7158a08 Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.230222 4903 scope.go:117] "RemoveContainer" containerID="fafe3b2dcaf18806767c862ffaf70758fc8a5b050ea74bfcf011b49c5621f8c9" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.270981 4903 scope.go:117] "RemoveContainer" containerID="35422cd04228e0dba9a40355200b7370644eb84e414493d6a0b1ecdc48d55373" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.373983 4903 scope.go:117] "RemoveContainer" containerID="ac099ea3ebd7e1d6ce9c0dc4b9ae7e278c5dfb8ba676037baeb02deccd638d52" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.466587 4903 scope.go:117] "RemoveContainer" containerID="f69da2585c146bc9a444441ea910f352fa960b9134b70230caa761cc423b17e9" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.507915 4903 scope.go:117] "RemoveContainer" containerID="f348679967e52c62752f42b001275a4cee1b022a4336d16421358cb8048332cb" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.578976 4903 scope.go:117] "RemoveContainer" containerID="4e42cad64ff42d36b9963b52437154707a0209d908848872d19cf2ef6c5491d3" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.628898 4903 scope.go:117] "RemoveContainer" containerID="18e7f1cdcd8f50ff37ba19a06b5ef9bb4c86a1190a035247c2ee5d98484f6c06" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.663656 4903 scope.go:117] "RemoveContainer" containerID="271b50da5c6c92ea0b2e59313c7c5816f9e97ce440d464e66ac83303b528355f" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.714888 4903 scope.go:117] "RemoveContainer" containerID="c4447939d076ee9886aa5135a2dae7189ba347d6de35dfefd6064a8138d4973c" Nov 27 00:14:28 crc kubenswrapper[4903]: E1127 00:14:28.716051 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c4447939d076ee9886aa5135a2dae7189ba347d6de35dfefd6064a8138d4973c\": container with ID starting with c4447939d076ee9886aa5135a2dae7189ba347d6de35dfefd6064a8138d4973c not found: ID does not exist" containerID="c4447939d076ee9886aa5135a2dae7189ba347d6de35dfefd6064a8138d4973c" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.716094 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4447939d076ee9886aa5135a2dae7189ba347d6de35dfefd6064a8138d4973c"} err="failed to get container status \"c4447939d076ee9886aa5135a2dae7189ba347d6de35dfefd6064a8138d4973c\": rpc error: code = NotFound desc = could not find container \"c4447939d076ee9886aa5135a2dae7189ba347d6de35dfefd6064a8138d4973c\": container with ID starting with c4447939d076ee9886aa5135a2dae7189ba347d6de35dfefd6064a8138d4973c not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.716121 4903 scope.go:117] "RemoveContainer" containerID="fafe3b2dcaf18806767c862ffaf70758fc8a5b050ea74bfcf011b49c5621f8c9" Nov 27 00:14:28 crc kubenswrapper[4903]: E1127 00:14:28.717198 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fafe3b2dcaf18806767c862ffaf70758fc8a5b050ea74bfcf011b49c5621f8c9\": container with ID starting with fafe3b2dcaf18806767c862ffaf70758fc8a5b050ea74bfcf011b49c5621f8c9 not found: ID does not exist" containerID="fafe3b2dcaf18806767c862ffaf70758fc8a5b050ea74bfcf011b49c5621f8c9" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.717278 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fafe3b2dcaf18806767c862ffaf70758fc8a5b050ea74bfcf011b49c5621f8c9"} err="failed to get container status \"fafe3b2dcaf18806767c862ffaf70758fc8a5b050ea74bfcf011b49c5621f8c9\": rpc error: code = NotFound desc = could not find container \"fafe3b2dcaf18806767c862ffaf70758fc8a5b050ea74bfcf011b49c5621f8c9\": container with ID starting with fafe3b2dcaf18806767c862ffaf70758fc8a5b050ea74bfcf011b49c5621f8c9 not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.717316 4903 scope.go:117] "RemoveContainer" containerID="35422cd04228e0dba9a40355200b7370644eb84e414493d6a0b1ecdc48d55373" Nov 27 00:14:28 crc kubenswrapper[4903]: E1127 00:14:28.717608 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"35422cd04228e0dba9a40355200b7370644eb84e414493d6a0b1ecdc48d55373\": container with ID starting with 35422cd04228e0dba9a40355200b7370644eb84e414493d6a0b1ecdc48d55373 not found: ID does not exist" containerID="35422cd04228e0dba9a40355200b7370644eb84e414493d6a0b1ecdc48d55373" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.717636 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"35422cd04228e0dba9a40355200b7370644eb84e414493d6a0b1ecdc48d55373"} err="failed to get container status \"35422cd04228e0dba9a40355200b7370644eb84e414493d6a0b1ecdc48d55373\": rpc error: code = NotFound desc = could not find container \"35422cd04228e0dba9a40355200b7370644eb84e414493d6a0b1ecdc48d55373\": container with ID starting with 35422cd04228e0dba9a40355200b7370644eb84e414493d6a0b1ecdc48d55373 not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.717651 4903 scope.go:117] "RemoveContainer" containerID="ac099ea3ebd7e1d6ce9c0dc4b9ae7e278c5dfb8ba676037baeb02deccd638d52" Nov 27 00:14:28 crc kubenswrapper[4903]: E1127 00:14:28.718024 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac099ea3ebd7e1d6ce9c0dc4b9ae7e278c5dfb8ba676037baeb02deccd638d52\": container with ID starting with ac099ea3ebd7e1d6ce9c0dc4b9ae7e278c5dfb8ba676037baeb02deccd638d52 not found: ID does not exist" containerID="ac099ea3ebd7e1d6ce9c0dc4b9ae7e278c5dfb8ba676037baeb02deccd638d52" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.718057 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac099ea3ebd7e1d6ce9c0dc4b9ae7e278c5dfb8ba676037baeb02deccd638d52"} err="failed to get container status \"ac099ea3ebd7e1d6ce9c0dc4b9ae7e278c5dfb8ba676037baeb02deccd638d52\": rpc error: code = NotFound desc = could not find container \"ac099ea3ebd7e1d6ce9c0dc4b9ae7e278c5dfb8ba676037baeb02deccd638d52\": container with ID starting with ac099ea3ebd7e1d6ce9c0dc4b9ae7e278c5dfb8ba676037baeb02deccd638d52 not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.718084 4903 scope.go:117] "RemoveContainer" containerID="f69da2585c146bc9a444441ea910f352fa960b9134b70230caa761cc423b17e9" Nov 27 00:14:28 crc kubenswrapper[4903]: E1127 00:14:28.719367 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f69da2585c146bc9a444441ea910f352fa960b9134b70230caa761cc423b17e9\": container with ID starting with f69da2585c146bc9a444441ea910f352fa960b9134b70230caa761cc423b17e9 not found: ID does not exist" containerID="f69da2585c146bc9a444441ea910f352fa960b9134b70230caa761cc423b17e9" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.719390 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f69da2585c146bc9a444441ea910f352fa960b9134b70230caa761cc423b17e9"} err="failed to get container status \"f69da2585c146bc9a444441ea910f352fa960b9134b70230caa761cc423b17e9\": rpc error: code = NotFound desc = could not find container \"f69da2585c146bc9a444441ea910f352fa960b9134b70230caa761cc423b17e9\": container with ID starting with f69da2585c146bc9a444441ea910f352fa960b9134b70230caa761cc423b17e9 not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.719404 4903 scope.go:117] "RemoveContainer" containerID="f348679967e52c62752f42b001275a4cee1b022a4336d16421358cb8048332cb" Nov 27 00:14:28 crc kubenswrapper[4903]: E1127 00:14:28.719776 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f348679967e52c62752f42b001275a4cee1b022a4336d16421358cb8048332cb\": container with ID starting with f348679967e52c62752f42b001275a4cee1b022a4336d16421358cb8048332cb not found: ID does not exist" containerID="f348679967e52c62752f42b001275a4cee1b022a4336d16421358cb8048332cb" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.719802 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f348679967e52c62752f42b001275a4cee1b022a4336d16421358cb8048332cb"} err="failed to get container status \"f348679967e52c62752f42b001275a4cee1b022a4336d16421358cb8048332cb\": rpc error: code = NotFound desc = could not find container \"f348679967e52c62752f42b001275a4cee1b022a4336d16421358cb8048332cb\": container with ID starting with f348679967e52c62752f42b001275a4cee1b022a4336d16421358cb8048332cb not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.719818 4903 scope.go:117] "RemoveContainer" containerID="4e42cad64ff42d36b9963b52437154707a0209d908848872d19cf2ef6c5491d3" Nov 27 00:14:28 crc kubenswrapper[4903]: E1127 00:14:28.726045 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e42cad64ff42d36b9963b52437154707a0209d908848872d19cf2ef6c5491d3\": container with ID starting with 4e42cad64ff42d36b9963b52437154707a0209d908848872d19cf2ef6c5491d3 not found: ID does not exist" containerID="4e42cad64ff42d36b9963b52437154707a0209d908848872d19cf2ef6c5491d3" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.726094 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e42cad64ff42d36b9963b52437154707a0209d908848872d19cf2ef6c5491d3"} err="failed to get container status \"4e42cad64ff42d36b9963b52437154707a0209d908848872d19cf2ef6c5491d3\": rpc error: code = NotFound desc = could not find container \"4e42cad64ff42d36b9963b52437154707a0209d908848872d19cf2ef6c5491d3\": container with ID starting with 4e42cad64ff42d36b9963b52437154707a0209d908848872d19cf2ef6c5491d3 not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.726123 4903 scope.go:117] "RemoveContainer" containerID="18e7f1cdcd8f50ff37ba19a06b5ef9bb4c86a1190a035247c2ee5d98484f6c06" Nov 27 00:14:28 crc kubenswrapper[4903]: E1127 00:14:28.730883 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18e7f1cdcd8f50ff37ba19a06b5ef9bb4c86a1190a035247c2ee5d98484f6c06\": container with ID starting with 18e7f1cdcd8f50ff37ba19a06b5ef9bb4c86a1190a035247c2ee5d98484f6c06 not found: ID does not exist" containerID="18e7f1cdcd8f50ff37ba19a06b5ef9bb4c86a1190a035247c2ee5d98484f6c06" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.730945 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18e7f1cdcd8f50ff37ba19a06b5ef9bb4c86a1190a035247c2ee5d98484f6c06"} err="failed to get container status \"18e7f1cdcd8f50ff37ba19a06b5ef9bb4c86a1190a035247c2ee5d98484f6c06\": rpc error: code = NotFound desc = could not find container \"18e7f1cdcd8f50ff37ba19a06b5ef9bb4c86a1190a035247c2ee5d98484f6c06\": container with ID starting with 18e7f1cdcd8f50ff37ba19a06b5ef9bb4c86a1190a035247c2ee5d98484f6c06 not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.730975 4903 scope.go:117] "RemoveContainer" containerID="271b50da5c6c92ea0b2e59313c7c5816f9e97ce440d464e66ac83303b528355f" Nov 27 00:14:28 crc kubenswrapper[4903]: E1127 00:14:28.731745 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"271b50da5c6c92ea0b2e59313c7c5816f9e97ce440d464e66ac83303b528355f\": container with ID starting with 271b50da5c6c92ea0b2e59313c7c5816f9e97ce440d464e66ac83303b528355f not found: ID does not exist" containerID="271b50da5c6c92ea0b2e59313c7c5816f9e97ce440d464e66ac83303b528355f" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.731795 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"271b50da5c6c92ea0b2e59313c7c5816f9e97ce440d464e66ac83303b528355f"} err="failed to get container status \"271b50da5c6c92ea0b2e59313c7c5816f9e97ce440d464e66ac83303b528355f\": rpc error: code = NotFound desc = could not find container \"271b50da5c6c92ea0b2e59313c7c5816f9e97ce440d464e66ac83303b528355f\": container with ID starting with 271b50da5c6c92ea0b2e59313c7c5816f9e97ce440d464e66ac83303b528355f not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.731850 4903 scope.go:117] "RemoveContainer" containerID="c4447939d076ee9886aa5135a2dae7189ba347d6de35dfefd6064a8138d4973c" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.732193 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4447939d076ee9886aa5135a2dae7189ba347d6de35dfefd6064a8138d4973c"} err="failed to get container status \"c4447939d076ee9886aa5135a2dae7189ba347d6de35dfefd6064a8138d4973c\": rpc error: code = NotFound desc = could not find container \"c4447939d076ee9886aa5135a2dae7189ba347d6de35dfefd6064a8138d4973c\": container with ID starting with c4447939d076ee9886aa5135a2dae7189ba347d6de35dfefd6064a8138d4973c not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.732217 4903 scope.go:117] "RemoveContainer" containerID="fafe3b2dcaf18806767c862ffaf70758fc8a5b050ea74bfcf011b49c5621f8c9" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.732764 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fafe3b2dcaf18806767c862ffaf70758fc8a5b050ea74bfcf011b49c5621f8c9"} err="failed to get container status \"fafe3b2dcaf18806767c862ffaf70758fc8a5b050ea74bfcf011b49c5621f8c9\": rpc error: code = NotFound desc = could not find container \"fafe3b2dcaf18806767c862ffaf70758fc8a5b050ea74bfcf011b49c5621f8c9\": container with ID starting with fafe3b2dcaf18806767c862ffaf70758fc8a5b050ea74bfcf011b49c5621f8c9 not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.732789 4903 scope.go:117] "RemoveContainer" containerID="35422cd04228e0dba9a40355200b7370644eb84e414493d6a0b1ecdc48d55373" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.733074 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"35422cd04228e0dba9a40355200b7370644eb84e414493d6a0b1ecdc48d55373"} err="failed to get container status \"35422cd04228e0dba9a40355200b7370644eb84e414493d6a0b1ecdc48d55373\": rpc error: code = NotFound desc = could not find container \"35422cd04228e0dba9a40355200b7370644eb84e414493d6a0b1ecdc48d55373\": container with ID starting with 35422cd04228e0dba9a40355200b7370644eb84e414493d6a0b1ecdc48d55373 not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.733103 4903 scope.go:117] "RemoveContainer" containerID="ac099ea3ebd7e1d6ce9c0dc4b9ae7e278c5dfb8ba676037baeb02deccd638d52" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.733411 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac099ea3ebd7e1d6ce9c0dc4b9ae7e278c5dfb8ba676037baeb02deccd638d52"} err="failed to get container status \"ac099ea3ebd7e1d6ce9c0dc4b9ae7e278c5dfb8ba676037baeb02deccd638d52\": rpc error: code = NotFound desc = could not find container \"ac099ea3ebd7e1d6ce9c0dc4b9ae7e278c5dfb8ba676037baeb02deccd638d52\": container with ID starting with ac099ea3ebd7e1d6ce9c0dc4b9ae7e278c5dfb8ba676037baeb02deccd638d52 not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.733448 4903 scope.go:117] "RemoveContainer" containerID="f69da2585c146bc9a444441ea910f352fa960b9134b70230caa761cc423b17e9" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.733657 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f69da2585c146bc9a444441ea910f352fa960b9134b70230caa761cc423b17e9"} err="failed to get container status \"f69da2585c146bc9a444441ea910f352fa960b9134b70230caa761cc423b17e9\": rpc error: code = NotFound desc = could not find container \"f69da2585c146bc9a444441ea910f352fa960b9134b70230caa761cc423b17e9\": container with ID starting with f69da2585c146bc9a444441ea910f352fa960b9134b70230caa761cc423b17e9 not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.733675 4903 scope.go:117] "RemoveContainer" containerID="f348679967e52c62752f42b001275a4cee1b022a4336d16421358cb8048332cb" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.734026 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f348679967e52c62752f42b001275a4cee1b022a4336d16421358cb8048332cb"} err="failed to get container status \"f348679967e52c62752f42b001275a4cee1b022a4336d16421358cb8048332cb\": rpc error: code = NotFound desc = could not find container \"f348679967e52c62752f42b001275a4cee1b022a4336d16421358cb8048332cb\": container with ID starting with f348679967e52c62752f42b001275a4cee1b022a4336d16421358cb8048332cb not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.734044 4903 scope.go:117] "RemoveContainer" containerID="4e42cad64ff42d36b9963b52437154707a0209d908848872d19cf2ef6c5491d3" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.734249 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e42cad64ff42d36b9963b52437154707a0209d908848872d19cf2ef6c5491d3"} err="failed to get container status \"4e42cad64ff42d36b9963b52437154707a0209d908848872d19cf2ef6c5491d3\": rpc error: code = NotFound desc = could not find container \"4e42cad64ff42d36b9963b52437154707a0209d908848872d19cf2ef6c5491d3\": container with ID starting with 4e42cad64ff42d36b9963b52437154707a0209d908848872d19cf2ef6c5491d3 not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.734266 4903 scope.go:117] "RemoveContainer" containerID="18e7f1cdcd8f50ff37ba19a06b5ef9bb4c86a1190a035247c2ee5d98484f6c06" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.734511 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18e7f1cdcd8f50ff37ba19a06b5ef9bb4c86a1190a035247c2ee5d98484f6c06"} err="failed to get container status \"18e7f1cdcd8f50ff37ba19a06b5ef9bb4c86a1190a035247c2ee5d98484f6c06\": rpc error: code = NotFound desc = could not find container \"18e7f1cdcd8f50ff37ba19a06b5ef9bb4c86a1190a035247c2ee5d98484f6c06\": container with ID starting with 18e7f1cdcd8f50ff37ba19a06b5ef9bb4c86a1190a035247c2ee5d98484f6c06 not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.734530 4903 scope.go:117] "RemoveContainer" containerID="271b50da5c6c92ea0b2e59313c7c5816f9e97ce440d464e66ac83303b528355f" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.734893 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"271b50da5c6c92ea0b2e59313c7c5816f9e97ce440d464e66ac83303b528355f"} err="failed to get container status \"271b50da5c6c92ea0b2e59313c7c5816f9e97ce440d464e66ac83303b528355f\": rpc error: code = NotFound desc = could not find container \"271b50da5c6c92ea0b2e59313c7c5816f9e97ce440d464e66ac83303b528355f\": container with ID starting with 271b50da5c6c92ea0b2e59313c7c5816f9e97ce440d464e66ac83303b528355f not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.734915 4903 scope.go:117] "RemoveContainer" containerID="c4447939d076ee9886aa5135a2dae7189ba347d6de35dfefd6064a8138d4973c" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.735243 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4447939d076ee9886aa5135a2dae7189ba347d6de35dfefd6064a8138d4973c"} err="failed to get container status \"c4447939d076ee9886aa5135a2dae7189ba347d6de35dfefd6064a8138d4973c\": rpc error: code = NotFound desc = could not find container \"c4447939d076ee9886aa5135a2dae7189ba347d6de35dfefd6064a8138d4973c\": container with ID starting with c4447939d076ee9886aa5135a2dae7189ba347d6de35dfefd6064a8138d4973c not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.735261 4903 scope.go:117] "RemoveContainer" containerID="fafe3b2dcaf18806767c862ffaf70758fc8a5b050ea74bfcf011b49c5621f8c9" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.735567 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fafe3b2dcaf18806767c862ffaf70758fc8a5b050ea74bfcf011b49c5621f8c9"} err="failed to get container status \"fafe3b2dcaf18806767c862ffaf70758fc8a5b050ea74bfcf011b49c5621f8c9\": rpc error: code = NotFound desc = could not find container \"fafe3b2dcaf18806767c862ffaf70758fc8a5b050ea74bfcf011b49c5621f8c9\": container with ID starting with fafe3b2dcaf18806767c862ffaf70758fc8a5b050ea74bfcf011b49c5621f8c9 not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.735586 4903 scope.go:117] "RemoveContainer" containerID="35422cd04228e0dba9a40355200b7370644eb84e414493d6a0b1ecdc48d55373" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.735862 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"35422cd04228e0dba9a40355200b7370644eb84e414493d6a0b1ecdc48d55373"} err="failed to get container status \"35422cd04228e0dba9a40355200b7370644eb84e414493d6a0b1ecdc48d55373\": rpc error: code = NotFound desc = could not find container \"35422cd04228e0dba9a40355200b7370644eb84e414493d6a0b1ecdc48d55373\": container with ID starting with 35422cd04228e0dba9a40355200b7370644eb84e414493d6a0b1ecdc48d55373 not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.735879 4903 scope.go:117] "RemoveContainer" containerID="ac099ea3ebd7e1d6ce9c0dc4b9ae7e278c5dfb8ba676037baeb02deccd638d52" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.736083 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac099ea3ebd7e1d6ce9c0dc4b9ae7e278c5dfb8ba676037baeb02deccd638d52"} err="failed to get container status \"ac099ea3ebd7e1d6ce9c0dc4b9ae7e278c5dfb8ba676037baeb02deccd638d52\": rpc error: code = NotFound desc = could not find container \"ac099ea3ebd7e1d6ce9c0dc4b9ae7e278c5dfb8ba676037baeb02deccd638d52\": container with ID starting with ac099ea3ebd7e1d6ce9c0dc4b9ae7e278c5dfb8ba676037baeb02deccd638d52 not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.736101 4903 scope.go:117] "RemoveContainer" containerID="f69da2585c146bc9a444441ea910f352fa960b9134b70230caa761cc423b17e9" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.737452 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f69da2585c146bc9a444441ea910f352fa960b9134b70230caa761cc423b17e9"} err="failed to get container status \"f69da2585c146bc9a444441ea910f352fa960b9134b70230caa761cc423b17e9\": rpc error: code = NotFound desc = could not find container \"f69da2585c146bc9a444441ea910f352fa960b9134b70230caa761cc423b17e9\": container with ID starting with f69da2585c146bc9a444441ea910f352fa960b9134b70230caa761cc423b17e9 not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.737493 4903 scope.go:117] "RemoveContainer" containerID="f348679967e52c62752f42b001275a4cee1b022a4336d16421358cb8048332cb" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.737859 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f348679967e52c62752f42b001275a4cee1b022a4336d16421358cb8048332cb"} err="failed to get container status \"f348679967e52c62752f42b001275a4cee1b022a4336d16421358cb8048332cb\": rpc error: code = NotFound desc = could not find container \"f348679967e52c62752f42b001275a4cee1b022a4336d16421358cb8048332cb\": container with ID starting with f348679967e52c62752f42b001275a4cee1b022a4336d16421358cb8048332cb not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.737881 4903 scope.go:117] "RemoveContainer" containerID="4e42cad64ff42d36b9963b52437154707a0209d908848872d19cf2ef6c5491d3" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.738123 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e42cad64ff42d36b9963b52437154707a0209d908848872d19cf2ef6c5491d3"} err="failed to get container status \"4e42cad64ff42d36b9963b52437154707a0209d908848872d19cf2ef6c5491d3\": rpc error: code = NotFound desc = could not find container \"4e42cad64ff42d36b9963b52437154707a0209d908848872d19cf2ef6c5491d3\": container with ID starting with 4e42cad64ff42d36b9963b52437154707a0209d908848872d19cf2ef6c5491d3 not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.738160 4903 scope.go:117] "RemoveContainer" containerID="18e7f1cdcd8f50ff37ba19a06b5ef9bb4c86a1190a035247c2ee5d98484f6c06" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.738350 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18e7f1cdcd8f50ff37ba19a06b5ef9bb4c86a1190a035247c2ee5d98484f6c06"} err="failed to get container status \"18e7f1cdcd8f50ff37ba19a06b5ef9bb4c86a1190a035247c2ee5d98484f6c06\": rpc error: code = NotFound desc = could not find container \"18e7f1cdcd8f50ff37ba19a06b5ef9bb4c86a1190a035247c2ee5d98484f6c06\": container with ID starting with 18e7f1cdcd8f50ff37ba19a06b5ef9bb4c86a1190a035247c2ee5d98484f6c06 not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.738368 4903 scope.go:117] "RemoveContainer" containerID="271b50da5c6c92ea0b2e59313c7c5816f9e97ce440d464e66ac83303b528355f" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.739588 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"271b50da5c6c92ea0b2e59313c7c5816f9e97ce440d464e66ac83303b528355f"} err="failed to get container status \"271b50da5c6c92ea0b2e59313c7c5816f9e97ce440d464e66ac83303b528355f\": rpc error: code = NotFound desc = could not find container \"271b50da5c6c92ea0b2e59313c7c5816f9e97ce440d464e66ac83303b528355f\": container with ID starting with 271b50da5c6c92ea0b2e59313c7c5816f9e97ce440d464e66ac83303b528355f not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.739624 4903 scope.go:117] "RemoveContainer" containerID="c4447939d076ee9886aa5135a2dae7189ba347d6de35dfefd6064a8138d4973c" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.742358 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4447939d076ee9886aa5135a2dae7189ba347d6de35dfefd6064a8138d4973c"} err="failed to get container status \"c4447939d076ee9886aa5135a2dae7189ba347d6de35dfefd6064a8138d4973c\": rpc error: code = NotFound desc = could not find container \"c4447939d076ee9886aa5135a2dae7189ba347d6de35dfefd6064a8138d4973c\": container with ID starting with c4447939d076ee9886aa5135a2dae7189ba347d6de35dfefd6064a8138d4973c not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.742379 4903 scope.go:117] "RemoveContainer" containerID="fafe3b2dcaf18806767c862ffaf70758fc8a5b050ea74bfcf011b49c5621f8c9" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.744553 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fafe3b2dcaf18806767c862ffaf70758fc8a5b050ea74bfcf011b49c5621f8c9"} err="failed to get container status \"fafe3b2dcaf18806767c862ffaf70758fc8a5b050ea74bfcf011b49c5621f8c9\": rpc error: code = NotFound desc = could not find container \"fafe3b2dcaf18806767c862ffaf70758fc8a5b050ea74bfcf011b49c5621f8c9\": container with ID starting with fafe3b2dcaf18806767c862ffaf70758fc8a5b050ea74bfcf011b49c5621f8c9 not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.744591 4903 scope.go:117] "RemoveContainer" containerID="35422cd04228e0dba9a40355200b7370644eb84e414493d6a0b1ecdc48d55373" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.745181 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"35422cd04228e0dba9a40355200b7370644eb84e414493d6a0b1ecdc48d55373"} err="failed to get container status \"35422cd04228e0dba9a40355200b7370644eb84e414493d6a0b1ecdc48d55373\": rpc error: code = NotFound desc = could not find container \"35422cd04228e0dba9a40355200b7370644eb84e414493d6a0b1ecdc48d55373\": container with ID starting with 35422cd04228e0dba9a40355200b7370644eb84e414493d6a0b1ecdc48d55373 not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.745203 4903 scope.go:117] "RemoveContainer" containerID="ac099ea3ebd7e1d6ce9c0dc4b9ae7e278c5dfb8ba676037baeb02deccd638d52" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.745576 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac099ea3ebd7e1d6ce9c0dc4b9ae7e278c5dfb8ba676037baeb02deccd638d52"} err="failed to get container status \"ac099ea3ebd7e1d6ce9c0dc4b9ae7e278c5dfb8ba676037baeb02deccd638d52\": rpc error: code = NotFound desc = could not find container \"ac099ea3ebd7e1d6ce9c0dc4b9ae7e278c5dfb8ba676037baeb02deccd638d52\": container with ID starting with ac099ea3ebd7e1d6ce9c0dc4b9ae7e278c5dfb8ba676037baeb02deccd638d52 not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.745617 4903 scope.go:117] "RemoveContainer" containerID="f69da2585c146bc9a444441ea910f352fa960b9134b70230caa761cc423b17e9" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.745892 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f69da2585c146bc9a444441ea910f352fa960b9134b70230caa761cc423b17e9"} err="failed to get container status \"f69da2585c146bc9a444441ea910f352fa960b9134b70230caa761cc423b17e9\": rpc error: code = NotFound desc = could not find container \"f69da2585c146bc9a444441ea910f352fa960b9134b70230caa761cc423b17e9\": container with ID starting with f69da2585c146bc9a444441ea910f352fa960b9134b70230caa761cc423b17e9 not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.745921 4903 scope.go:117] "RemoveContainer" containerID="f348679967e52c62752f42b001275a4cee1b022a4336d16421358cb8048332cb" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.746098 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f348679967e52c62752f42b001275a4cee1b022a4336d16421358cb8048332cb"} err="failed to get container status \"f348679967e52c62752f42b001275a4cee1b022a4336d16421358cb8048332cb\": rpc error: code = NotFound desc = could not find container \"f348679967e52c62752f42b001275a4cee1b022a4336d16421358cb8048332cb\": container with ID starting with f348679967e52c62752f42b001275a4cee1b022a4336d16421358cb8048332cb not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.746119 4903 scope.go:117] "RemoveContainer" containerID="4e42cad64ff42d36b9963b52437154707a0209d908848872d19cf2ef6c5491d3" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.746348 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e42cad64ff42d36b9963b52437154707a0209d908848872d19cf2ef6c5491d3"} err="failed to get container status \"4e42cad64ff42d36b9963b52437154707a0209d908848872d19cf2ef6c5491d3\": rpc error: code = NotFound desc = could not find container \"4e42cad64ff42d36b9963b52437154707a0209d908848872d19cf2ef6c5491d3\": container with ID starting with 4e42cad64ff42d36b9963b52437154707a0209d908848872d19cf2ef6c5491d3 not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.746368 4903 scope.go:117] "RemoveContainer" containerID="18e7f1cdcd8f50ff37ba19a06b5ef9bb4c86a1190a035247c2ee5d98484f6c06" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.747372 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18e7f1cdcd8f50ff37ba19a06b5ef9bb4c86a1190a035247c2ee5d98484f6c06"} err="failed to get container status \"18e7f1cdcd8f50ff37ba19a06b5ef9bb4c86a1190a035247c2ee5d98484f6c06\": rpc error: code = NotFound desc = could not find container \"18e7f1cdcd8f50ff37ba19a06b5ef9bb4c86a1190a035247c2ee5d98484f6c06\": container with ID starting with 18e7f1cdcd8f50ff37ba19a06b5ef9bb4c86a1190a035247c2ee5d98484f6c06 not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.747428 4903 scope.go:117] "RemoveContainer" containerID="271b50da5c6c92ea0b2e59313c7c5816f9e97ce440d464e66ac83303b528355f" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.747902 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"271b50da5c6c92ea0b2e59313c7c5816f9e97ce440d464e66ac83303b528355f"} err="failed to get container status \"271b50da5c6c92ea0b2e59313c7c5816f9e97ce440d464e66ac83303b528355f\": rpc error: code = NotFound desc = could not find container \"271b50da5c6c92ea0b2e59313c7c5816f9e97ce440d464e66ac83303b528355f\": container with ID starting with 271b50da5c6c92ea0b2e59313c7c5816f9e97ce440d464e66ac83303b528355f not found: ID does not exist" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.969335 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-bnlxc" event={"ID":"55cc9b1f-3ec2-47c4-9ab2-1047f4b54117","Type":"ContainerStarted","Data":"23d9b32eb772702deec818b8fb12bb9073e7c20b586ebe47643e2c034021bdac"} Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.969401 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-bnlxc" event={"ID":"55cc9b1f-3ec2-47c4-9ab2-1047f4b54117","Type":"ContainerStarted","Data":"f5aefacc36ec4739d63e5277fe99e618bddba34c6c766f8dd03bfc75d5888b3b"} Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.969437 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-bnlxc" Nov 27 00:14:28 crc kubenswrapper[4903]: I1127 00:14:28.997172 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-m5t9q" event={"ID":"b4378de9-e941-4007-b3ae-8471c5ace362","Type":"ContainerStarted","Data":"ff96670e77adda3f539b3ffdd1a0e1ff1c14a75ec82aec649bf276026fd2bc6f"} Nov 27 00:14:29 crc kubenswrapper[4903]: I1127 00:14:29.007207 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-5xfj8" event={"ID":"de51f671-d120-440f-8e93-9862dafbca57","Type":"ContainerStarted","Data":"f99722b046373c31b7dc39c164d9b68678b3b5a0e42d0a9b577c249eecb4e511"} Nov 27 00:14:29 crc kubenswrapper[4903]: I1127 00:14:29.007258 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-5xfj8" event={"ID":"de51f671-d120-440f-8e93-9862dafbca57","Type":"ContainerStarted","Data":"a17dd83ca80c2b532b1589a76b961217c28e84bc152d91945e79e8e82a40eec4"} Nov 27 00:14:29 crc kubenswrapper[4903]: I1127 00:14:29.007267 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-5xfj8" event={"ID":"de51f671-d120-440f-8e93-9862dafbca57","Type":"ContainerStarted","Data":"4e5f47e43d30b160cd97fd25f3bfeec882bc75235d9ed1e8c202049ec7158a08"} Nov 27 00:14:29 crc kubenswrapper[4903]: I1127 00:14:29.007405 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-f8648f98b-5xfj8" Nov 27 00:14:29 crc kubenswrapper[4903]: I1127 00:14:29.013265 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-bnlxc" podStartSLOduration=3.013246737 podStartE2EDuration="3.013246737s" podCreationTimestamp="2025-11-27 00:14:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-27 00:14:28.991461685 +0000 UTC m=+6797.681696605" watchObservedRunningTime="2025-11-27 00:14:29.013246737 +0000 UTC m=+6797.703481647" Nov 27 00:14:29 crc kubenswrapper[4903]: I1127 00:14:29.028873 4903 scope.go:117] "RemoveContainer" containerID="c94c27075d9b691dede6e2bdca4738ac0ca2bb05f39b1b845169635db93d8dc5" Nov 27 00:14:29 crc kubenswrapper[4903]: E1127 00:14:29.029165 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:14:29 crc kubenswrapper[4903]: I1127 00:14:29.039270 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-f8648f98b-5xfj8" podStartSLOduration=3.039245261 podStartE2EDuration="3.039245261s" podCreationTimestamp="2025-11-27 00:14:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-27 00:14:29.026280315 +0000 UTC m=+6797.716515225" watchObservedRunningTime="2025-11-27 00:14:29.039245261 +0000 UTC m=+6797.729480171" Nov 27 00:14:31 crc kubenswrapper[4903]: E1127 00:14:31.260481 4903 upgradeaware.go:441] Error proxying data from backend to client: writeto tcp 38.102.83.219:36150->38.102.83.219:36831: read tcp 38.102.83.219:36150->38.102.83.219:36831: read: connection reset by peer Nov 27 00:14:36 crc kubenswrapper[4903]: I1127 00:14:36.051011 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-c6fb8fd-gbdfx" Nov 27 00:14:36 crc kubenswrapper[4903]: I1127 00:14:36.154565 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["metallb-system/metallb-operator-webhook-server-6476c88b58-5prcv"] Nov 27 00:14:36 crc kubenswrapper[4903]: I1127 00:14:36.155038 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/metallb-operator-webhook-server-6476c88b58-5prcv" podUID="8ebc84bc-9ed7-40ce-a691-cd6c151debc2" containerName="webhook-server" containerID="cri-o://67ceebd1907fcf000df3a3c0107ce59a5def7ab0ebf2b740e93c0c493072ca07" gracePeriod=2 Nov 27 00:14:36 crc kubenswrapper[4903]: I1127 00:14:36.190497 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["metallb-system/metallb-operator-webhook-server-6476c88b58-5prcv"] Nov 27 00:14:36 crc kubenswrapper[4903]: I1127 00:14:36.830540 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-6476c88b58-5prcv" Nov 27 00:14:36 crc kubenswrapper[4903]: I1127 00:14:36.920657 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8ebc84bc-9ed7-40ce-a691-cd6c151debc2-apiservice-cert\") pod \"8ebc84bc-9ed7-40ce-a691-cd6c151debc2\" (UID: \"8ebc84bc-9ed7-40ce-a691-cd6c151debc2\") " Nov 27 00:14:36 crc kubenswrapper[4903]: I1127 00:14:36.920816 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8ebc84bc-9ed7-40ce-a691-cd6c151debc2-webhook-cert\") pod \"8ebc84bc-9ed7-40ce-a691-cd6c151debc2\" (UID: \"8ebc84bc-9ed7-40ce-a691-cd6c151debc2\") " Nov 27 00:14:36 crc kubenswrapper[4903]: I1127 00:14:36.920928 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5smp7\" (UniqueName: \"kubernetes.io/projected/8ebc84bc-9ed7-40ce-a691-cd6c151debc2-kube-api-access-5smp7\") pod \"8ebc84bc-9ed7-40ce-a691-cd6c151debc2\" (UID: \"8ebc84bc-9ed7-40ce-a691-cd6c151debc2\") " Nov 27 00:14:36 crc kubenswrapper[4903]: I1127 00:14:36.928902 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ebc84bc-9ed7-40ce-a691-cd6c151debc2-kube-api-access-5smp7" (OuterVolumeSpecName: "kube-api-access-5smp7") pod "8ebc84bc-9ed7-40ce-a691-cd6c151debc2" (UID: "8ebc84bc-9ed7-40ce-a691-cd6c151debc2"). InnerVolumeSpecName "kube-api-access-5smp7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:14:36 crc kubenswrapper[4903]: I1127 00:14:36.938865 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ebc84bc-9ed7-40ce-a691-cd6c151debc2-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "8ebc84bc-9ed7-40ce-a691-cd6c151debc2" (UID: "8ebc84bc-9ed7-40ce-a691-cd6c151debc2"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:14:36 crc kubenswrapper[4903]: I1127 00:14:36.938911 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ebc84bc-9ed7-40ce-a691-cd6c151debc2-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "8ebc84bc-9ed7-40ce-a691-cd6c151debc2" (UID: "8ebc84bc-9ed7-40ce-a691-cd6c151debc2"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:14:37 crc kubenswrapper[4903]: I1127 00:14:37.023263 4903 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8ebc84bc-9ed7-40ce-a691-cd6c151debc2-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 27 00:14:37 crc kubenswrapper[4903]: I1127 00:14:37.023296 4903 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8ebc84bc-9ed7-40ce-a691-cd6c151debc2-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 27 00:14:37 crc kubenswrapper[4903]: I1127 00:14:37.023306 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5smp7\" (UniqueName: \"kubernetes.io/projected/8ebc84bc-9ed7-40ce-a691-cd6c151debc2-kube-api-access-5smp7\") on node \"crc\" DevicePath \"\"" Nov 27 00:14:37 crc kubenswrapper[4903]: I1127 00:14:37.168905 4903 generic.go:334] "Generic (PLEG): container finished" podID="8ebc84bc-9ed7-40ce-a691-cd6c151debc2" containerID="67ceebd1907fcf000df3a3c0107ce59a5def7ab0ebf2b740e93c0c493072ca07" exitCode=0 Nov 27 00:14:37 crc kubenswrapper[4903]: I1127 00:14:37.169162 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-6476c88b58-5prcv" Nov 27 00:14:37 crc kubenswrapper[4903]: I1127 00:14:37.169166 4903 scope.go:117] "RemoveContainer" containerID="67ceebd1907fcf000df3a3c0107ce59a5def7ab0ebf2b740e93c0c493072ca07" Nov 27 00:14:37 crc kubenswrapper[4903]: I1127 00:14:37.177032 4903 generic.go:334] "Generic (PLEG): container finished" podID="e26a694c-d57e-4087-b30b-bca10278c77c" containerID="d100515e97ed195d45c8510b465b0f15a8da1e8caa6bf93c984dbfcfcbf3e017" exitCode=0 Nov 27 00:14:37 crc kubenswrapper[4903]: I1127 00:14:37.177107 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7mw6b" event={"ID":"e26a694c-d57e-4087-b30b-bca10278c77c","Type":"ContainerDied","Data":"d100515e97ed195d45c8510b465b0f15a8da1e8caa6bf93c984dbfcfcbf3e017"} Nov 27 00:14:37 crc kubenswrapper[4903]: I1127 00:14:37.185905 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-m5t9q" event={"ID":"b4378de9-e941-4007-b3ae-8471c5ace362","Type":"ContainerStarted","Data":"9f3b99ade441ffe7aeda636f687ca24e54d63d2f6eecdce8fdc1d54f30f31c9a"} Nov 27 00:14:37 crc kubenswrapper[4903]: I1127 00:14:37.186887 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-m5t9q" Nov 27 00:14:37 crc kubenswrapper[4903]: I1127 00:14:37.244561 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-m5t9q" podStartSLOduration=3.107993897 podStartE2EDuration="11.244543426s" podCreationTimestamp="2025-11-27 00:14:26 +0000 UTC" firstStartedPulling="2025-11-27 00:14:27.969316219 +0000 UTC m=+6796.659551129" lastFinishedPulling="2025-11-27 00:14:36.105865748 +0000 UTC m=+6804.796100658" observedRunningTime="2025-11-27 00:14:37.236549152 +0000 UTC m=+6805.926784072" watchObservedRunningTime="2025-11-27 00:14:37.244543426 +0000 UTC m=+6805.934778336" Nov 27 00:14:37 crc kubenswrapper[4903]: I1127 00:14:37.391161 4903 scope.go:117] "RemoveContainer" containerID="67ceebd1907fcf000df3a3c0107ce59a5def7ab0ebf2b740e93c0c493072ca07" Nov 27 00:14:37 crc kubenswrapper[4903]: E1127 00:14:37.391869 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"67ceebd1907fcf000df3a3c0107ce59a5def7ab0ebf2b740e93c0c493072ca07\": container with ID starting with 67ceebd1907fcf000df3a3c0107ce59a5def7ab0ebf2b740e93c0c493072ca07 not found: ID does not exist" containerID="67ceebd1907fcf000df3a3c0107ce59a5def7ab0ebf2b740e93c0c493072ca07" Nov 27 00:14:37 crc kubenswrapper[4903]: I1127 00:14:37.391939 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67ceebd1907fcf000df3a3c0107ce59a5def7ab0ebf2b740e93c0c493072ca07"} err="failed to get container status \"67ceebd1907fcf000df3a3c0107ce59a5def7ab0ebf2b740e93c0c493072ca07\": rpc error: code = NotFound desc = could not find container \"67ceebd1907fcf000df3a3c0107ce59a5def7ab0ebf2b740e93c0c493072ca07\": container with ID starting with 67ceebd1907fcf000df3a3c0107ce59a5def7ab0ebf2b740e93c0c493072ca07 not found: ID does not exist" Nov 27 00:14:38 crc kubenswrapper[4903]: I1127 00:14:38.042890 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ebc84bc-9ed7-40ce-a691-cd6c151debc2" path="/var/lib/kubelet/pods/8ebc84bc-9ed7-40ce-a691-cd6c151debc2/volumes" Nov 27 00:14:38 crc kubenswrapper[4903]: I1127 00:14:38.200528 4903 generic.go:334] "Generic (PLEG): container finished" podID="e26a694c-d57e-4087-b30b-bca10278c77c" containerID="9b2b99f6bc1594fdbd6ab74275b5767ca6d49607e7720aaa50cac6d916a3a338" exitCode=0 Nov 27 00:14:38 crc kubenswrapper[4903]: I1127 00:14:38.200593 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7mw6b" event={"ID":"e26a694c-d57e-4087-b30b-bca10278c77c","Type":"ContainerDied","Data":"9b2b99f6bc1594fdbd6ab74275b5767ca6d49607e7720aaa50cac6d916a3a338"} Nov 27 00:14:39 crc kubenswrapper[4903]: I1127 00:14:39.230275 4903 generic.go:334] "Generic (PLEG): container finished" podID="e26a694c-d57e-4087-b30b-bca10278c77c" containerID="b723d333579996b69fdf324802063eb7569cb5cf73154e07ce2b539a43687532" exitCode=0 Nov 27 00:14:39 crc kubenswrapper[4903]: I1127 00:14:39.230379 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7mw6b" event={"ID":"e26a694c-d57e-4087-b30b-bca10278c77c","Type":"ContainerDied","Data":"b723d333579996b69fdf324802063eb7569cb5cf73154e07ce2b539a43687532"} Nov 27 00:14:40 crc kubenswrapper[4903]: I1127 00:14:40.030163 4903 scope.go:117] "RemoveContainer" containerID="c94c27075d9b691dede6e2bdca4738ac0ca2bb05f39b1b845169635db93d8dc5" Nov 27 00:14:40 crc kubenswrapper[4903]: E1127 00:14:40.030965 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:14:40 crc kubenswrapper[4903]: I1127 00:14:40.253729 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7mw6b" event={"ID":"e26a694c-d57e-4087-b30b-bca10278c77c","Type":"ContainerStarted","Data":"f093dfb56f9ab1183886865cde26eb5044f90a50dd1130c8ae538ce8af9b3024"} Nov 27 00:14:40 crc kubenswrapper[4903]: I1127 00:14:40.253779 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7mw6b" event={"ID":"e26a694c-d57e-4087-b30b-bca10278c77c","Type":"ContainerStarted","Data":"088b36da6ecf2e3e1e918a285ff70c33721421e594cceccbc493f9e33b02d4e6"} Nov 27 00:14:40 crc kubenswrapper[4903]: I1127 00:14:40.253792 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7mw6b" event={"ID":"e26a694c-d57e-4087-b30b-bca10278c77c","Type":"ContainerStarted","Data":"5bebc4f9418d6402e9876cff49ecabaf8c7e1b59112b7eeeb01ceb609080f02d"} Nov 27 00:14:40 crc kubenswrapper[4903]: I1127 00:14:40.253804 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7mw6b" event={"ID":"e26a694c-d57e-4087-b30b-bca10278c77c","Type":"ContainerStarted","Data":"599a6888ce62518d43e982c8fb9de62843fe8c6cd7c18a88149cc29f09405080"} Nov 27 00:14:41 crc kubenswrapper[4903]: I1127 00:14:41.269435 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7mw6b" event={"ID":"e26a694c-d57e-4087-b30b-bca10278c77c","Type":"ContainerStarted","Data":"b0a92403ca775692401fecf445163178f7b05185ee725e154f6975cdada50ac7"} Nov 27 00:14:41 crc kubenswrapper[4903]: I1127 00:14:41.269801 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-7mw6b" event={"ID":"e26a694c-d57e-4087-b30b-bca10278c77c","Type":"ContainerStarted","Data":"bcca8d0ee85c4838123fdb54557f519d6b051d88abc8a78eb9486f46d42c77bd"} Nov 27 00:14:41 crc kubenswrapper[4903]: I1127 00:14:41.269822 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-7mw6b" Nov 27 00:14:41 crc kubenswrapper[4903]: I1127 00:14:41.297109 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-7mw6b" podStartSLOduration=6.802994837 podStartE2EDuration="15.297083744s" podCreationTimestamp="2025-11-27 00:14:26 +0000 UTC" firstStartedPulling="2025-11-27 00:14:27.642952674 +0000 UTC m=+6796.333187574" lastFinishedPulling="2025-11-27 00:14:36.137041571 +0000 UTC m=+6804.827276481" observedRunningTime="2025-11-27 00:14:41.291331841 +0000 UTC m=+6809.981566771" watchObservedRunningTime="2025-11-27 00:14:41.297083744 +0000 UTC m=+6809.987318644" Nov 27 00:14:42 crc kubenswrapper[4903]: I1127 00:14:42.351782 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-7mw6b" Nov 27 00:14:42 crc kubenswrapper[4903]: I1127 00:14:42.392414 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-7mw6b" Nov 27 00:14:47 crc kubenswrapper[4903]: I1127 00:14:47.333177 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-m5t9q" Nov 27 00:14:47 crc kubenswrapper[4903]: I1127 00:14:47.389824 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-bnlxc" Nov 27 00:14:47 crc kubenswrapper[4903]: I1127 00:14:47.396061 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-f8648f98b-5xfj8" Nov 27 00:14:47 crc kubenswrapper[4903]: I1127 00:14:47.562419 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-s9t6m"] Nov 27 00:14:47 crc kubenswrapper[4903]: I1127 00:14:47.562652 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/frr-k8s-webhook-server-6998585d5-s9t6m" podUID="5317f83c-9fcf-4df1-9823-bb92767545a9" containerName="frr-k8s-webhook-server" containerID="cri-o://57a5cd55b5ebe258f7302f985442c228c38cc7a2f24733e434a5b27c40630bf5" gracePeriod=10 Nov 27 00:14:47 crc kubenswrapper[4903]: I1127 00:14:47.678543 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["metallb-system/controller-6c7b4b5f48-hw6vb"] Nov 27 00:14:47 crc kubenswrapper[4903]: I1127 00:14:47.678813 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/controller-6c7b4b5f48-hw6vb" podUID="a9e12d32-ef72-446c-b317-8d00a90a651b" containerName="controller" containerID="cri-o://03af32a426adce86fe49bf7537e8f8c458325eb493f6e86b8aff9d514beb27a1" gracePeriod=2 Nov 27 00:14:47 crc kubenswrapper[4903]: I1127 00:14:47.678947 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/controller-6c7b4b5f48-hw6vb" podUID="a9e12d32-ef72-446c-b317-8d00a90a651b" containerName="kube-rbac-proxy" containerID="cri-o://7e051cbc5ac3d1c70b3a70aebdccc89c6dcd22b494cee3a4023bee6490ebd082" gracePeriod=2 Nov 27 00:14:47 crc kubenswrapper[4903]: I1127 00:14:47.699155 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["metallb-system/controller-6c7b4b5f48-hw6vb"] Nov 27 00:14:48 crc kubenswrapper[4903]: I1127 00:14:48.389638 4903 generic.go:334] "Generic (PLEG): container finished" podID="a9e12d32-ef72-446c-b317-8d00a90a651b" containerID="7e051cbc5ac3d1c70b3a70aebdccc89c6dcd22b494cee3a4023bee6490ebd082" exitCode=0 Nov 27 00:14:48 crc kubenswrapper[4903]: I1127 00:14:48.390312 4903 generic.go:334] "Generic (PLEG): container finished" podID="a9e12d32-ef72-446c-b317-8d00a90a651b" containerID="03af32a426adce86fe49bf7537e8f8c458325eb493f6e86b8aff9d514beb27a1" exitCode=0 Nov 27 00:14:48 crc kubenswrapper[4903]: I1127 00:14:48.390391 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ee23e574e8127d561a627194e52d7c6fc6367e8565da86e4a7fe44ce29fa39e7" Nov 27 00:14:48 crc kubenswrapper[4903]: I1127 00:14:48.392878 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-s9t6m" Nov 27 00:14:48 crc kubenswrapper[4903]: I1127 00:14:48.393443 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-hw6vb" Nov 27 00:14:48 crc kubenswrapper[4903]: I1127 00:14:48.394378 4903 generic.go:334] "Generic (PLEG): container finished" podID="5317f83c-9fcf-4df1-9823-bb92767545a9" containerID="57a5cd55b5ebe258f7302f985442c228c38cc7a2f24733e434a5b27c40630bf5" exitCode=0 Nov 27 00:14:48 crc kubenswrapper[4903]: I1127 00:14:48.394420 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-s9t6m" event={"ID":"5317f83c-9fcf-4df1-9823-bb92767545a9","Type":"ContainerDied","Data":"57a5cd55b5ebe258f7302f985442c228c38cc7a2f24733e434a5b27c40630bf5"} Nov 27 00:14:48 crc kubenswrapper[4903]: I1127 00:14:48.394455 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-s9t6m" event={"ID":"5317f83c-9fcf-4df1-9823-bb92767545a9","Type":"ContainerDied","Data":"516b00221c2783a43ca6d366a243e6d12331651249845a92a48f8b1ead47255d"} Nov 27 00:14:48 crc kubenswrapper[4903]: I1127 00:14:48.394475 4903 scope.go:117] "RemoveContainer" containerID="57a5cd55b5ebe258f7302f985442c228c38cc7a2f24733e434a5b27c40630bf5" Nov 27 00:14:48 crc kubenswrapper[4903]: I1127 00:14:48.430126 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a9e12d32-ef72-446c-b317-8d00a90a651b-cert\") pod \"a9e12d32-ef72-446c-b317-8d00a90a651b\" (UID: \"a9e12d32-ef72-446c-b317-8d00a90a651b\") " Nov 27 00:14:48 crc kubenswrapper[4903]: I1127 00:14:48.430172 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5317f83c-9fcf-4df1-9823-bb92767545a9-cert\") pod \"5317f83c-9fcf-4df1-9823-bb92767545a9\" (UID: \"5317f83c-9fcf-4df1-9823-bb92767545a9\") " Nov 27 00:14:48 crc kubenswrapper[4903]: I1127 00:14:48.430284 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qtm9l\" (UniqueName: \"kubernetes.io/projected/5317f83c-9fcf-4df1-9823-bb92767545a9-kube-api-access-qtm9l\") pod \"5317f83c-9fcf-4df1-9823-bb92767545a9\" (UID: \"5317f83c-9fcf-4df1-9823-bb92767545a9\") " Nov 27 00:14:48 crc kubenswrapper[4903]: I1127 00:14:48.430324 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-48h6x\" (UniqueName: \"kubernetes.io/projected/a9e12d32-ef72-446c-b317-8d00a90a651b-kube-api-access-48h6x\") pod \"a9e12d32-ef72-446c-b317-8d00a90a651b\" (UID: \"a9e12d32-ef72-446c-b317-8d00a90a651b\") " Nov 27 00:14:48 crc kubenswrapper[4903]: I1127 00:14:48.430378 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a9e12d32-ef72-446c-b317-8d00a90a651b-metrics-certs\") pod \"a9e12d32-ef72-446c-b317-8d00a90a651b\" (UID: \"a9e12d32-ef72-446c-b317-8d00a90a651b\") " Nov 27 00:14:48 crc kubenswrapper[4903]: I1127 00:14:48.439962 4903 scope.go:117] "RemoveContainer" containerID="57a5cd55b5ebe258f7302f985442c228c38cc7a2f24733e434a5b27c40630bf5" Nov 27 00:14:48 crc kubenswrapper[4903]: E1127 00:14:48.441244 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"57a5cd55b5ebe258f7302f985442c228c38cc7a2f24733e434a5b27c40630bf5\": container with ID starting with 57a5cd55b5ebe258f7302f985442c228c38cc7a2f24733e434a5b27c40630bf5 not found: ID does not exist" containerID="57a5cd55b5ebe258f7302f985442c228c38cc7a2f24733e434a5b27c40630bf5" Nov 27 00:14:48 crc kubenswrapper[4903]: I1127 00:14:48.441314 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"57a5cd55b5ebe258f7302f985442c228c38cc7a2f24733e434a5b27c40630bf5"} err="failed to get container status \"57a5cd55b5ebe258f7302f985442c228c38cc7a2f24733e434a5b27c40630bf5\": rpc error: code = NotFound desc = could not find container \"57a5cd55b5ebe258f7302f985442c228c38cc7a2f24733e434a5b27c40630bf5\": container with ID starting with 57a5cd55b5ebe258f7302f985442c228c38cc7a2f24733e434a5b27c40630bf5 not found: ID does not exist" Nov 27 00:14:48 crc kubenswrapper[4903]: I1127 00:14:48.443090 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9e12d32-ef72-446c-b317-8d00a90a651b-kube-api-access-48h6x" (OuterVolumeSpecName: "kube-api-access-48h6x") pod "a9e12d32-ef72-446c-b317-8d00a90a651b" (UID: "a9e12d32-ef72-446c-b317-8d00a90a651b"). InnerVolumeSpecName "kube-api-access-48h6x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:14:48 crc kubenswrapper[4903]: I1127 00:14:48.443380 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5317f83c-9fcf-4df1-9823-bb92767545a9-cert" (OuterVolumeSpecName: "cert") pod "5317f83c-9fcf-4df1-9823-bb92767545a9" (UID: "5317f83c-9fcf-4df1-9823-bb92767545a9"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:14:48 crc kubenswrapper[4903]: I1127 00:14:48.447487 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9e12d32-ef72-446c-b317-8d00a90a651b-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "a9e12d32-ef72-446c-b317-8d00a90a651b" (UID: "a9e12d32-ef72-446c-b317-8d00a90a651b"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:14:48 crc kubenswrapper[4903]: I1127 00:14:48.447601 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9e12d32-ef72-446c-b317-8d00a90a651b-cert" (OuterVolumeSpecName: "cert") pod "a9e12d32-ef72-446c-b317-8d00a90a651b" (UID: "a9e12d32-ef72-446c-b317-8d00a90a651b"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:14:48 crc kubenswrapper[4903]: I1127 00:14:48.447659 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5317f83c-9fcf-4df1-9823-bb92767545a9-kube-api-access-qtm9l" (OuterVolumeSpecName: "kube-api-access-qtm9l") pod "5317f83c-9fcf-4df1-9823-bb92767545a9" (UID: "5317f83c-9fcf-4df1-9823-bb92767545a9"). InnerVolumeSpecName "kube-api-access-qtm9l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:14:48 crc kubenswrapper[4903]: I1127 00:14:48.534337 4903 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a9e12d32-ef72-446c-b317-8d00a90a651b-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 27 00:14:48 crc kubenswrapper[4903]: I1127 00:14:48.534379 4903 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a9e12d32-ef72-446c-b317-8d00a90a651b-cert\") on node \"crc\" DevicePath \"\"" Nov 27 00:14:48 crc kubenswrapper[4903]: I1127 00:14:48.534395 4903 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5317f83c-9fcf-4df1-9823-bb92767545a9-cert\") on node \"crc\" DevicePath \"\"" Nov 27 00:14:48 crc kubenswrapper[4903]: I1127 00:14:48.534409 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qtm9l\" (UniqueName: \"kubernetes.io/projected/5317f83c-9fcf-4df1-9823-bb92767545a9-kube-api-access-qtm9l\") on node \"crc\" DevicePath \"\"" Nov 27 00:14:48 crc kubenswrapper[4903]: I1127 00:14:48.534424 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-48h6x\" (UniqueName: \"kubernetes.io/projected/a9e12d32-ef72-446c-b317-8d00a90a651b-kube-api-access-48h6x\") on node \"crc\" DevicePath \"\"" Nov 27 00:14:49 crc kubenswrapper[4903]: I1127 00:14:49.408532 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-hw6vb" Nov 27 00:14:49 crc kubenswrapper[4903]: I1127 00:14:49.408552 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-s9t6m" Nov 27 00:14:49 crc kubenswrapper[4903]: I1127 00:14:49.457489 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-s9t6m"] Nov 27 00:14:49 crc kubenswrapper[4903]: I1127 00:14:49.477730 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-s9t6m"] Nov 27 00:14:50 crc kubenswrapper[4903]: I1127 00:14:50.050988 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5317f83c-9fcf-4df1-9823-bb92767545a9" path="/var/lib/kubelet/pods/5317f83c-9fcf-4df1-9823-bb92767545a9/volumes" Nov 27 00:14:50 crc kubenswrapper[4903]: I1127 00:14:50.052025 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9e12d32-ef72-446c-b317-8d00a90a651b" path="/var/lib/kubelet/pods/a9e12d32-ef72-446c-b317-8d00a90a651b/volumes" Nov 27 00:14:54 crc kubenswrapper[4903]: I1127 00:14:54.030733 4903 scope.go:117] "RemoveContainer" containerID="c94c27075d9b691dede6e2bdca4738ac0ca2bb05f39b1b845169635db93d8dc5" Nov 27 00:14:54 crc kubenswrapper[4903]: E1127 00:14:54.031806 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:14:55 crc kubenswrapper[4903]: I1127 00:14:55.840932 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-5cf6d5447f-sb6mg" Nov 27 00:14:55 crc kubenswrapper[4903]: I1127 00:14:55.917826 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["metallb-system/metallb-operator-controller-manager-55f4fb8967-nxn9g"] Nov 27 00:14:55 crc kubenswrapper[4903]: I1127 00:14:55.918381 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="metallb-system/metallb-operator-controller-manager-55f4fb8967-nxn9g" podUID="d4705259-0dd2-4374-b6be-3ac6e57ae8f6" containerName="manager" containerID="cri-o://1efcfc641b7ff7199b6027116cf717ef5cdaced6e59ab55a37c130db14601542" gracePeriod=10 Nov 27 00:14:56 crc kubenswrapper[4903]: I1127 00:14:56.496356 4903 generic.go:334] "Generic (PLEG): container finished" podID="d4705259-0dd2-4374-b6be-3ac6e57ae8f6" containerID="1efcfc641b7ff7199b6027116cf717ef5cdaced6e59ab55a37c130db14601542" exitCode=0 Nov 27 00:14:56 crc kubenswrapper[4903]: I1127 00:14:56.496412 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-55f4fb8967-nxn9g" event={"ID":"d4705259-0dd2-4374-b6be-3ac6e57ae8f6","Type":"ContainerDied","Data":"1efcfc641b7ff7199b6027116cf717ef5cdaced6e59ab55a37c130db14601542"} Nov 27 00:14:56 crc kubenswrapper[4903]: I1127 00:14:56.496710 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-55f4fb8967-nxn9g" event={"ID":"d4705259-0dd2-4374-b6be-3ac6e57ae8f6","Type":"ContainerDied","Data":"922bf28b4f542a7b3a2ffc2c832ef17eacfae85c1b7b6a6aeb5e8b7f9eb1127f"} Nov 27 00:14:56 crc kubenswrapper[4903]: I1127 00:14:56.496727 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="922bf28b4f542a7b3a2ffc2c832ef17eacfae85c1b7b6a6aeb5e8b7f9eb1127f" Nov 27 00:14:56 crc kubenswrapper[4903]: I1127 00:14:56.554576 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-55f4fb8967-nxn9g" Nov 27 00:14:56 crc kubenswrapper[4903]: I1127 00:14:56.650511 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m2xjh\" (UniqueName: \"kubernetes.io/projected/d4705259-0dd2-4374-b6be-3ac6e57ae8f6-kube-api-access-m2xjh\") pod \"d4705259-0dd2-4374-b6be-3ac6e57ae8f6\" (UID: \"d4705259-0dd2-4374-b6be-3ac6e57ae8f6\") " Nov 27 00:14:56 crc kubenswrapper[4903]: I1127 00:14:56.650583 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d4705259-0dd2-4374-b6be-3ac6e57ae8f6-apiservice-cert\") pod \"d4705259-0dd2-4374-b6be-3ac6e57ae8f6\" (UID: \"d4705259-0dd2-4374-b6be-3ac6e57ae8f6\") " Nov 27 00:14:56 crc kubenswrapper[4903]: I1127 00:14:56.650627 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d4705259-0dd2-4374-b6be-3ac6e57ae8f6-webhook-cert\") pod \"d4705259-0dd2-4374-b6be-3ac6e57ae8f6\" (UID: \"d4705259-0dd2-4374-b6be-3ac6e57ae8f6\") " Nov 27 00:14:56 crc kubenswrapper[4903]: I1127 00:14:56.657624 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4705259-0dd2-4374-b6be-3ac6e57ae8f6-kube-api-access-m2xjh" (OuterVolumeSpecName: "kube-api-access-m2xjh") pod "d4705259-0dd2-4374-b6be-3ac6e57ae8f6" (UID: "d4705259-0dd2-4374-b6be-3ac6e57ae8f6"). InnerVolumeSpecName "kube-api-access-m2xjh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:14:56 crc kubenswrapper[4903]: I1127 00:14:56.657636 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4705259-0dd2-4374-b6be-3ac6e57ae8f6-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "d4705259-0dd2-4374-b6be-3ac6e57ae8f6" (UID: "d4705259-0dd2-4374-b6be-3ac6e57ae8f6"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:14:56 crc kubenswrapper[4903]: I1127 00:14:56.657715 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4705259-0dd2-4374-b6be-3ac6e57ae8f6-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "d4705259-0dd2-4374-b6be-3ac6e57ae8f6" (UID: "d4705259-0dd2-4374-b6be-3ac6e57ae8f6"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:14:56 crc kubenswrapper[4903]: I1127 00:14:56.754379 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m2xjh\" (UniqueName: \"kubernetes.io/projected/d4705259-0dd2-4374-b6be-3ac6e57ae8f6-kube-api-access-m2xjh\") on node \"crc\" DevicePath \"\"" Nov 27 00:14:56 crc kubenswrapper[4903]: I1127 00:14:56.754456 4903 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d4705259-0dd2-4374-b6be-3ac6e57ae8f6-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 27 00:14:56 crc kubenswrapper[4903]: I1127 00:14:56.754474 4903 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d4705259-0dd2-4374-b6be-3ac6e57ae8f6-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 27 00:14:57 crc kubenswrapper[4903]: I1127 00:14:57.355267 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-7mw6b" Nov 27 00:14:57 crc kubenswrapper[4903]: I1127 00:14:57.508477 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-55f4fb8967-nxn9g" Nov 27 00:14:57 crc kubenswrapper[4903]: I1127 00:14:57.556254 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["metallb-system/metallb-operator-controller-manager-55f4fb8967-nxn9g"] Nov 27 00:14:57 crc kubenswrapper[4903]: I1127 00:14:57.571493 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["metallb-system/metallb-operator-controller-manager-55f4fb8967-nxn9g"] Nov 27 00:14:58 crc kubenswrapper[4903]: I1127 00:14:58.044274 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4705259-0dd2-4374-b6be-3ac6e57ae8f6" path="/var/lib/kubelet/pods/d4705259-0dd2-4374-b6be-3ac6e57ae8f6/volumes" Nov 27 00:15:00 crc kubenswrapper[4903]: I1127 00:15:00.176686 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403375-tgbfx"] Nov 27 00:15:00 crc kubenswrapper[4903]: E1127 00:15:00.177907 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4705259-0dd2-4374-b6be-3ac6e57ae8f6" containerName="manager" Nov 27 00:15:00 crc kubenswrapper[4903]: I1127 00:15:00.177926 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4705259-0dd2-4374-b6be-3ac6e57ae8f6" containerName="manager" Nov 27 00:15:00 crc kubenswrapper[4903]: E1127 00:15:00.177945 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9e12d32-ef72-446c-b317-8d00a90a651b" containerName="controller" Nov 27 00:15:00 crc kubenswrapper[4903]: I1127 00:15:00.177953 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9e12d32-ef72-446c-b317-8d00a90a651b" containerName="controller" Nov 27 00:15:00 crc kubenswrapper[4903]: E1127 00:15:00.177977 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5317f83c-9fcf-4df1-9823-bb92767545a9" containerName="frr-k8s-webhook-server" Nov 27 00:15:00 crc kubenswrapper[4903]: I1127 00:15:00.177985 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="5317f83c-9fcf-4df1-9823-bb92767545a9" containerName="frr-k8s-webhook-server" Nov 27 00:15:00 crc kubenswrapper[4903]: E1127 00:15:00.177999 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9e12d32-ef72-446c-b317-8d00a90a651b" containerName="kube-rbac-proxy" Nov 27 00:15:00 crc kubenswrapper[4903]: I1127 00:15:00.178007 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9e12d32-ef72-446c-b317-8d00a90a651b" containerName="kube-rbac-proxy" Nov 27 00:15:00 crc kubenswrapper[4903]: E1127 00:15:00.178035 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ebc84bc-9ed7-40ce-a691-cd6c151debc2" containerName="webhook-server" Nov 27 00:15:00 crc kubenswrapper[4903]: I1127 00:15:00.178042 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ebc84bc-9ed7-40ce-a691-cd6c151debc2" containerName="webhook-server" Nov 27 00:15:00 crc kubenswrapper[4903]: I1127 00:15:00.178312 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9e12d32-ef72-446c-b317-8d00a90a651b" containerName="kube-rbac-proxy" Nov 27 00:15:00 crc kubenswrapper[4903]: I1127 00:15:00.178340 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ebc84bc-9ed7-40ce-a691-cd6c151debc2" containerName="webhook-server" Nov 27 00:15:00 crc kubenswrapper[4903]: I1127 00:15:00.178364 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4705259-0dd2-4374-b6be-3ac6e57ae8f6" containerName="manager" Nov 27 00:15:00 crc kubenswrapper[4903]: I1127 00:15:00.178393 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9e12d32-ef72-446c-b317-8d00a90a651b" containerName="controller" Nov 27 00:15:00 crc kubenswrapper[4903]: I1127 00:15:00.178407 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="5317f83c-9fcf-4df1-9823-bb92767545a9" containerName="frr-k8s-webhook-server" Nov 27 00:15:00 crc kubenswrapper[4903]: I1127 00:15:00.179430 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403375-tgbfx" Nov 27 00:15:00 crc kubenswrapper[4903]: I1127 00:15:00.186176 4903 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 27 00:15:00 crc kubenswrapper[4903]: I1127 00:15:00.188888 4903 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 27 00:15:00 crc kubenswrapper[4903]: I1127 00:15:00.203857 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403375-tgbfx"] Nov 27 00:15:00 crc kubenswrapper[4903]: I1127 00:15:00.235994 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f2515a80-e4eb-44de-8bcc-22df2dfd772f-secret-volume\") pod \"collect-profiles-29403375-tgbfx\" (UID: \"f2515a80-e4eb-44de-8bcc-22df2dfd772f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403375-tgbfx" Nov 27 00:15:00 crc kubenswrapper[4903]: I1127 00:15:00.236055 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f2515a80-e4eb-44de-8bcc-22df2dfd772f-config-volume\") pod \"collect-profiles-29403375-tgbfx\" (UID: \"f2515a80-e4eb-44de-8bcc-22df2dfd772f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403375-tgbfx" Nov 27 00:15:00 crc kubenswrapper[4903]: I1127 00:15:00.236173 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlf6j\" (UniqueName: \"kubernetes.io/projected/f2515a80-e4eb-44de-8bcc-22df2dfd772f-kube-api-access-jlf6j\") pod \"collect-profiles-29403375-tgbfx\" (UID: \"f2515a80-e4eb-44de-8bcc-22df2dfd772f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403375-tgbfx" Nov 27 00:15:00 crc kubenswrapper[4903]: I1127 00:15:00.337918 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f2515a80-e4eb-44de-8bcc-22df2dfd772f-config-volume\") pod \"collect-profiles-29403375-tgbfx\" (UID: \"f2515a80-e4eb-44de-8bcc-22df2dfd772f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403375-tgbfx" Nov 27 00:15:00 crc kubenswrapper[4903]: I1127 00:15:00.338013 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlf6j\" (UniqueName: \"kubernetes.io/projected/f2515a80-e4eb-44de-8bcc-22df2dfd772f-kube-api-access-jlf6j\") pod \"collect-profiles-29403375-tgbfx\" (UID: \"f2515a80-e4eb-44de-8bcc-22df2dfd772f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403375-tgbfx" Nov 27 00:15:00 crc kubenswrapper[4903]: I1127 00:15:00.338195 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f2515a80-e4eb-44de-8bcc-22df2dfd772f-secret-volume\") pod \"collect-profiles-29403375-tgbfx\" (UID: \"f2515a80-e4eb-44de-8bcc-22df2dfd772f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403375-tgbfx" Nov 27 00:15:00 crc kubenswrapper[4903]: I1127 00:15:00.339320 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f2515a80-e4eb-44de-8bcc-22df2dfd772f-config-volume\") pod \"collect-profiles-29403375-tgbfx\" (UID: \"f2515a80-e4eb-44de-8bcc-22df2dfd772f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403375-tgbfx" Nov 27 00:15:00 crc kubenswrapper[4903]: I1127 00:15:00.352884 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f2515a80-e4eb-44de-8bcc-22df2dfd772f-secret-volume\") pod \"collect-profiles-29403375-tgbfx\" (UID: \"f2515a80-e4eb-44de-8bcc-22df2dfd772f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403375-tgbfx" Nov 27 00:15:00 crc kubenswrapper[4903]: I1127 00:15:00.357410 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlf6j\" (UniqueName: \"kubernetes.io/projected/f2515a80-e4eb-44de-8bcc-22df2dfd772f-kube-api-access-jlf6j\") pod \"collect-profiles-29403375-tgbfx\" (UID: \"f2515a80-e4eb-44de-8bcc-22df2dfd772f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403375-tgbfx" Nov 27 00:15:00 crc kubenswrapper[4903]: I1127 00:15:00.511362 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403375-tgbfx" Nov 27 00:15:01 crc kubenswrapper[4903]: I1127 00:15:01.083988 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403375-tgbfx"] Nov 27 00:15:01 crc kubenswrapper[4903]: I1127 00:15:01.579910 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403375-tgbfx" event={"ID":"f2515a80-e4eb-44de-8bcc-22df2dfd772f","Type":"ContainerStarted","Data":"e74528586d46bd1a9d43572fc31f3203997e637fd68a53786615549f78e9507f"} Nov 27 00:15:01 crc kubenswrapper[4903]: I1127 00:15:01.580297 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403375-tgbfx" event={"ID":"f2515a80-e4eb-44de-8bcc-22df2dfd772f","Type":"ContainerStarted","Data":"a0a531028c8dd6c8fa48dece6e434bc666656b99893a1d2f58e5be5eea544d40"} Nov 27 00:15:01 crc kubenswrapper[4903]: I1127 00:15:01.598160 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29403375-tgbfx" podStartSLOduration=1.598131974 podStartE2EDuration="1.598131974s" podCreationTimestamp="2025-11-27 00:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-27 00:15:01.595945916 +0000 UTC m=+6830.286180866" watchObservedRunningTime="2025-11-27 00:15:01.598131974 +0000 UTC m=+6830.288366924" Nov 27 00:15:02 crc kubenswrapper[4903]: I1127 00:15:02.695531 4903 generic.go:334] "Generic (PLEG): container finished" podID="f2515a80-e4eb-44de-8bcc-22df2dfd772f" containerID="e74528586d46bd1a9d43572fc31f3203997e637fd68a53786615549f78e9507f" exitCode=0 Nov 27 00:15:02 crc kubenswrapper[4903]: I1127 00:15:02.695610 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403375-tgbfx" event={"ID":"f2515a80-e4eb-44de-8bcc-22df2dfd772f","Type":"ContainerDied","Data":"e74528586d46bd1a9d43572fc31f3203997e637fd68a53786615549f78e9507f"} Nov 27 00:15:04 crc kubenswrapper[4903]: I1127 00:15:04.236010 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403375-tgbfx" Nov 27 00:15:04 crc kubenswrapper[4903]: I1127 00:15:04.354133 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f2515a80-e4eb-44de-8bcc-22df2dfd772f-config-volume\") pod \"f2515a80-e4eb-44de-8bcc-22df2dfd772f\" (UID: \"f2515a80-e4eb-44de-8bcc-22df2dfd772f\") " Nov 27 00:15:04 crc kubenswrapper[4903]: I1127 00:15:04.354257 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jlf6j\" (UniqueName: \"kubernetes.io/projected/f2515a80-e4eb-44de-8bcc-22df2dfd772f-kube-api-access-jlf6j\") pod \"f2515a80-e4eb-44de-8bcc-22df2dfd772f\" (UID: \"f2515a80-e4eb-44de-8bcc-22df2dfd772f\") " Nov 27 00:15:04 crc kubenswrapper[4903]: I1127 00:15:04.354445 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f2515a80-e4eb-44de-8bcc-22df2dfd772f-secret-volume\") pod \"f2515a80-e4eb-44de-8bcc-22df2dfd772f\" (UID: \"f2515a80-e4eb-44de-8bcc-22df2dfd772f\") " Nov 27 00:15:04 crc kubenswrapper[4903]: I1127 00:15:04.355042 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2515a80-e4eb-44de-8bcc-22df2dfd772f-config-volume" (OuterVolumeSpecName: "config-volume") pod "f2515a80-e4eb-44de-8bcc-22df2dfd772f" (UID: "f2515a80-e4eb-44de-8bcc-22df2dfd772f"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 27 00:15:04 crc kubenswrapper[4903]: I1127 00:15:04.361393 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2515a80-e4eb-44de-8bcc-22df2dfd772f-kube-api-access-jlf6j" (OuterVolumeSpecName: "kube-api-access-jlf6j") pod "f2515a80-e4eb-44de-8bcc-22df2dfd772f" (UID: "f2515a80-e4eb-44de-8bcc-22df2dfd772f"). InnerVolumeSpecName "kube-api-access-jlf6j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:15:04 crc kubenswrapper[4903]: I1127 00:15:04.361578 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2515a80-e4eb-44de-8bcc-22df2dfd772f-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "f2515a80-e4eb-44de-8bcc-22df2dfd772f" (UID: "f2515a80-e4eb-44de-8bcc-22df2dfd772f"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 27 00:15:04 crc kubenswrapper[4903]: I1127 00:15:04.457427 4903 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f2515a80-e4eb-44de-8bcc-22df2dfd772f-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 27 00:15:04 crc kubenswrapper[4903]: I1127 00:15:04.457465 4903 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f2515a80-e4eb-44de-8bcc-22df2dfd772f-config-volume\") on node \"crc\" DevicePath \"\"" Nov 27 00:15:04 crc kubenswrapper[4903]: I1127 00:15:04.457478 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jlf6j\" (UniqueName: \"kubernetes.io/projected/f2515a80-e4eb-44de-8bcc-22df2dfd772f-kube-api-access-jlf6j\") on node \"crc\" DevicePath \"\"" Nov 27 00:15:04 crc kubenswrapper[4903]: I1127 00:15:04.702572 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403330-c78fr"] Nov 27 00:15:04 crc kubenswrapper[4903]: I1127 00:15:04.718970 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403330-c78fr"] Nov 27 00:15:04 crc kubenswrapper[4903]: I1127 00:15:04.723974 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403375-tgbfx" Nov 27 00:15:04 crc kubenswrapper[4903]: I1127 00:15:04.723944 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403375-tgbfx" event={"ID":"f2515a80-e4eb-44de-8bcc-22df2dfd772f","Type":"ContainerDied","Data":"a0a531028c8dd6c8fa48dece6e434bc666656b99893a1d2f58e5be5eea544d40"} Nov 27 00:15:04 crc kubenswrapper[4903]: I1127 00:15:04.724293 4903 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a0a531028c8dd6c8fa48dece6e434bc666656b99893a1d2f58e5be5eea544d40" Nov 27 00:15:05 crc kubenswrapper[4903]: I1127 00:15:05.028183 4903 scope.go:117] "RemoveContainer" containerID="c94c27075d9b691dede6e2bdca4738ac0ca2bb05f39b1b845169635db93d8dc5" Nov 27 00:15:05 crc kubenswrapper[4903]: E1127 00:15:05.028547 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:15:06 crc kubenswrapper[4903]: I1127 00:15:06.054645 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54c8bbb7-0e41-4734-a723-33bf901ae7ea" path="/var/lib/kubelet/pods/54c8bbb7-0e41-4734-a723-33bf901ae7ea/volumes" Nov 27 00:15:16 crc kubenswrapper[4903]: I1127 00:15:16.029778 4903 scope.go:117] "RemoveContainer" containerID="c94c27075d9b691dede6e2bdca4738ac0ca2bb05f39b1b845169635db93d8dc5" Nov 27 00:15:16 crc kubenswrapper[4903]: E1127 00:15:16.031091 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:15:29 crc kubenswrapper[4903]: I1127 00:15:29.029062 4903 scope.go:117] "RemoveContainer" containerID="c94c27075d9b691dede6e2bdca4738ac0ca2bb05f39b1b845169635db93d8dc5" Nov 27 00:15:29 crc kubenswrapper[4903]: E1127 00:15:29.029933 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:15:29 crc kubenswrapper[4903]: I1127 00:15:29.429797 4903 scope.go:117] "RemoveContainer" containerID="7e051cbc5ac3d1c70b3a70aebdccc89c6dcd22b494cee3a4023bee6490ebd082" Nov 27 00:15:29 crc kubenswrapper[4903]: I1127 00:15:29.457641 4903 scope.go:117] "RemoveContainer" containerID="01cbb4f88242d44d8a23e5730b3a62482d9e438f77b4ebc28e168909a47cf853" Nov 27 00:15:29 crc kubenswrapper[4903]: I1127 00:15:29.487668 4903 scope.go:117] "RemoveContainer" containerID="03af32a426adce86fe49bf7537e8f8c458325eb493f6e86b8aff9d514beb27a1" Nov 27 00:15:44 crc kubenswrapper[4903]: I1127 00:15:44.028589 4903 scope.go:117] "RemoveContainer" containerID="c94c27075d9b691dede6e2bdca4738ac0ca2bb05f39b1b845169635db93d8dc5" Nov 27 00:15:44 crc kubenswrapper[4903]: E1127 00:15:44.029493 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:15:57 crc kubenswrapper[4903]: I1127 00:15:57.028846 4903 scope.go:117] "RemoveContainer" containerID="c94c27075d9b691dede6e2bdca4738ac0ca2bb05f39b1b845169635db93d8dc5" Nov 27 00:15:57 crc kubenswrapper[4903]: E1127 00:15:57.029947 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:16:04 crc kubenswrapper[4903]: I1127 00:16:04.597651 4903 generic.go:334] "Generic (PLEG): container finished" podID="46129129-7d0f-416e-bdc2-500691655897" containerID="e7300a453dc8b7dd11e5f03220802f58de7bc86cf5780ed2ae986989bb9b849c" exitCode=0 Nov 27 00:16:04 crc kubenswrapper[4903]: I1127 00:16:04.597758 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-tczvh/must-gather-855zj" event={"ID":"46129129-7d0f-416e-bdc2-500691655897","Type":"ContainerDied","Data":"e7300a453dc8b7dd11e5f03220802f58de7bc86cf5780ed2ae986989bb9b849c"} Nov 27 00:16:04 crc kubenswrapper[4903]: I1127 00:16:04.598940 4903 scope.go:117] "RemoveContainer" containerID="e7300a453dc8b7dd11e5f03220802f58de7bc86cf5780ed2ae986989bb9b849c" Nov 27 00:16:04 crc kubenswrapper[4903]: I1127 00:16:04.768521 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-tczvh_must-gather-855zj_46129129-7d0f-416e-bdc2-500691655897/gather/0.log" Nov 27 00:16:12 crc kubenswrapper[4903]: I1127 00:16:12.039275 4903 scope.go:117] "RemoveContainer" containerID="c94c27075d9b691dede6e2bdca4738ac0ca2bb05f39b1b845169635db93d8dc5" Nov 27 00:16:12 crc kubenswrapper[4903]: E1127 00:16:12.040421 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:16:16 crc kubenswrapper[4903]: I1127 00:16:16.443911 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-tczvh/must-gather-855zj"] Nov 27 00:16:16 crc kubenswrapper[4903]: I1127 00:16:16.444743 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-tczvh/must-gather-855zj" podUID="46129129-7d0f-416e-bdc2-500691655897" containerName="copy" containerID="cri-o://4c9316f66d88b024ff84891899d7ceceac26831eb2c8ba8e1be8f508497ea4e0" gracePeriod=2 Nov 27 00:16:16 crc kubenswrapper[4903]: I1127 00:16:16.457597 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-tczvh/must-gather-855zj"] Nov 27 00:16:16 crc kubenswrapper[4903]: I1127 00:16:16.742213 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-tczvh_must-gather-855zj_46129129-7d0f-416e-bdc2-500691655897/copy/0.log" Nov 27 00:16:16 crc kubenswrapper[4903]: I1127 00:16:16.742973 4903 generic.go:334] "Generic (PLEG): container finished" podID="46129129-7d0f-416e-bdc2-500691655897" containerID="4c9316f66d88b024ff84891899d7ceceac26831eb2c8ba8e1be8f508497ea4e0" exitCode=143 Nov 27 00:16:16 crc kubenswrapper[4903]: I1127 00:16:16.956307 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-tczvh_must-gather-855zj_46129129-7d0f-416e-bdc2-500691655897/copy/0.log" Nov 27 00:16:16 crc kubenswrapper[4903]: I1127 00:16:16.956929 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tczvh/must-gather-855zj" Nov 27 00:16:17 crc kubenswrapper[4903]: I1127 00:16:17.105641 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/46129129-7d0f-416e-bdc2-500691655897-must-gather-output\") pod \"46129129-7d0f-416e-bdc2-500691655897\" (UID: \"46129129-7d0f-416e-bdc2-500691655897\") " Nov 27 00:16:17 crc kubenswrapper[4903]: I1127 00:16:17.105896 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hpwfk\" (UniqueName: \"kubernetes.io/projected/46129129-7d0f-416e-bdc2-500691655897-kube-api-access-hpwfk\") pod \"46129129-7d0f-416e-bdc2-500691655897\" (UID: \"46129129-7d0f-416e-bdc2-500691655897\") " Nov 27 00:16:17 crc kubenswrapper[4903]: I1127 00:16:17.128506 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46129129-7d0f-416e-bdc2-500691655897-kube-api-access-hpwfk" (OuterVolumeSpecName: "kube-api-access-hpwfk") pod "46129129-7d0f-416e-bdc2-500691655897" (UID: "46129129-7d0f-416e-bdc2-500691655897"). InnerVolumeSpecName "kube-api-access-hpwfk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:16:17 crc kubenswrapper[4903]: I1127 00:16:17.208936 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hpwfk\" (UniqueName: \"kubernetes.io/projected/46129129-7d0f-416e-bdc2-500691655897-kube-api-access-hpwfk\") on node \"crc\" DevicePath \"\"" Nov 27 00:16:17 crc kubenswrapper[4903]: I1127 00:16:17.307805 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/46129129-7d0f-416e-bdc2-500691655897-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "46129129-7d0f-416e-bdc2-500691655897" (UID: "46129129-7d0f-416e-bdc2-500691655897"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 27 00:16:17 crc kubenswrapper[4903]: I1127 00:16:17.311317 4903 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/46129129-7d0f-416e-bdc2-500691655897-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 27 00:16:17 crc kubenswrapper[4903]: I1127 00:16:17.754516 4903 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-tczvh_must-gather-855zj_46129129-7d0f-416e-bdc2-500691655897/copy/0.log" Nov 27 00:16:17 crc kubenswrapper[4903]: I1127 00:16:17.755120 4903 scope.go:117] "RemoveContainer" containerID="4c9316f66d88b024ff84891899d7ceceac26831eb2c8ba8e1be8f508497ea4e0" Nov 27 00:16:17 crc kubenswrapper[4903]: I1127 00:16:17.755150 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-tczvh/must-gather-855zj" Nov 27 00:16:17 crc kubenswrapper[4903]: I1127 00:16:17.776848 4903 scope.go:117] "RemoveContainer" containerID="e7300a453dc8b7dd11e5f03220802f58de7bc86cf5780ed2ae986989bb9b849c" Nov 27 00:16:18 crc kubenswrapper[4903]: I1127 00:16:18.042189 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="46129129-7d0f-416e-bdc2-500691655897" path="/var/lib/kubelet/pods/46129129-7d0f-416e-bdc2-500691655897/volumes" Nov 27 00:16:24 crc kubenswrapper[4903]: I1127 00:16:24.028868 4903 scope.go:117] "RemoveContainer" containerID="c94c27075d9b691dede6e2bdca4738ac0ca2bb05f39b1b845169635db93d8dc5" Nov 27 00:16:24 crc kubenswrapper[4903]: E1127 00:16:24.031026 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:16:27 crc kubenswrapper[4903]: I1127 00:16:27.204460 4903 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-2xvqq"] Nov 27 00:16:27 crc kubenswrapper[4903]: E1127 00:16:27.206484 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46129129-7d0f-416e-bdc2-500691655897" containerName="copy" Nov 27 00:16:27 crc kubenswrapper[4903]: I1127 00:16:27.206590 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="46129129-7d0f-416e-bdc2-500691655897" containerName="copy" Nov 27 00:16:27 crc kubenswrapper[4903]: E1127 00:16:27.206712 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46129129-7d0f-416e-bdc2-500691655897" containerName="gather" Nov 27 00:16:27 crc kubenswrapper[4903]: I1127 00:16:27.206798 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="46129129-7d0f-416e-bdc2-500691655897" containerName="gather" Nov 27 00:16:27 crc kubenswrapper[4903]: E1127 00:16:27.206947 4903 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2515a80-e4eb-44de-8bcc-22df2dfd772f" containerName="collect-profiles" Nov 27 00:16:27 crc kubenswrapper[4903]: I1127 00:16:27.207033 4903 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2515a80-e4eb-44de-8bcc-22df2dfd772f" containerName="collect-profiles" Nov 27 00:16:27 crc kubenswrapper[4903]: I1127 00:16:27.207431 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2515a80-e4eb-44de-8bcc-22df2dfd772f" containerName="collect-profiles" Nov 27 00:16:27 crc kubenswrapper[4903]: I1127 00:16:27.207596 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="46129129-7d0f-416e-bdc2-500691655897" containerName="copy" Nov 27 00:16:27 crc kubenswrapper[4903]: I1127 00:16:27.207715 4903 memory_manager.go:354] "RemoveStaleState removing state" podUID="46129129-7d0f-416e-bdc2-500691655897" containerName="gather" Nov 27 00:16:27 crc kubenswrapper[4903]: I1127 00:16:27.209796 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2xvqq" Nov 27 00:16:27 crc kubenswrapper[4903]: I1127 00:16:27.219920 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2xvqq"] Nov 27 00:16:27 crc kubenswrapper[4903]: I1127 00:16:27.354029 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hcmsh\" (UniqueName: \"kubernetes.io/projected/5af14c60-241c-45b8-b749-a20d62645433-kube-api-access-hcmsh\") pod \"community-operators-2xvqq\" (UID: \"5af14c60-241c-45b8-b749-a20d62645433\") " pod="openshift-marketplace/community-operators-2xvqq" Nov 27 00:16:27 crc kubenswrapper[4903]: I1127 00:16:27.354105 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5af14c60-241c-45b8-b749-a20d62645433-utilities\") pod \"community-operators-2xvqq\" (UID: \"5af14c60-241c-45b8-b749-a20d62645433\") " pod="openshift-marketplace/community-operators-2xvqq" Nov 27 00:16:27 crc kubenswrapper[4903]: I1127 00:16:27.354143 4903 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5af14c60-241c-45b8-b749-a20d62645433-catalog-content\") pod \"community-operators-2xvqq\" (UID: \"5af14c60-241c-45b8-b749-a20d62645433\") " pod="openshift-marketplace/community-operators-2xvqq" Nov 27 00:16:27 crc kubenswrapper[4903]: I1127 00:16:27.456726 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hcmsh\" (UniqueName: \"kubernetes.io/projected/5af14c60-241c-45b8-b749-a20d62645433-kube-api-access-hcmsh\") pod \"community-operators-2xvqq\" (UID: \"5af14c60-241c-45b8-b749-a20d62645433\") " pod="openshift-marketplace/community-operators-2xvqq" Nov 27 00:16:27 crc kubenswrapper[4903]: I1127 00:16:27.456786 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5af14c60-241c-45b8-b749-a20d62645433-utilities\") pod \"community-operators-2xvqq\" (UID: \"5af14c60-241c-45b8-b749-a20d62645433\") " pod="openshift-marketplace/community-operators-2xvqq" Nov 27 00:16:27 crc kubenswrapper[4903]: I1127 00:16:27.456814 4903 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5af14c60-241c-45b8-b749-a20d62645433-catalog-content\") pod \"community-operators-2xvqq\" (UID: \"5af14c60-241c-45b8-b749-a20d62645433\") " pod="openshift-marketplace/community-operators-2xvqq" Nov 27 00:16:27 crc kubenswrapper[4903]: I1127 00:16:27.457309 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5af14c60-241c-45b8-b749-a20d62645433-utilities\") pod \"community-operators-2xvqq\" (UID: \"5af14c60-241c-45b8-b749-a20d62645433\") " pod="openshift-marketplace/community-operators-2xvqq" Nov 27 00:16:27 crc kubenswrapper[4903]: I1127 00:16:27.457395 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5af14c60-241c-45b8-b749-a20d62645433-catalog-content\") pod \"community-operators-2xvqq\" (UID: \"5af14c60-241c-45b8-b749-a20d62645433\") " pod="openshift-marketplace/community-operators-2xvqq" Nov 27 00:16:27 crc kubenswrapper[4903]: I1127 00:16:27.474674 4903 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hcmsh\" (UniqueName: \"kubernetes.io/projected/5af14c60-241c-45b8-b749-a20d62645433-kube-api-access-hcmsh\") pod \"community-operators-2xvqq\" (UID: \"5af14c60-241c-45b8-b749-a20d62645433\") " pod="openshift-marketplace/community-operators-2xvqq" Nov 27 00:16:27 crc kubenswrapper[4903]: I1127 00:16:27.553979 4903 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2xvqq" Nov 27 00:16:28 crc kubenswrapper[4903]: I1127 00:16:28.071426 4903 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2xvqq"] Nov 27 00:16:28 crc kubenswrapper[4903]: I1127 00:16:28.894801 4903 generic.go:334] "Generic (PLEG): container finished" podID="5af14c60-241c-45b8-b749-a20d62645433" containerID="2ce017dc9768d92f2329682d01ebdd9a687cee0ed1a139d349169be2dbb36231" exitCode=0 Nov 27 00:16:28 crc kubenswrapper[4903]: I1127 00:16:28.894890 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2xvqq" event={"ID":"5af14c60-241c-45b8-b749-a20d62645433","Type":"ContainerDied","Data":"2ce017dc9768d92f2329682d01ebdd9a687cee0ed1a139d349169be2dbb36231"} Nov 27 00:16:28 crc kubenswrapper[4903]: I1127 00:16:28.895122 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2xvqq" event={"ID":"5af14c60-241c-45b8-b749-a20d62645433","Type":"ContainerStarted","Data":"a40cd4adb00345f233a17fc53846d4aeb4fb2b773a0285c965635ab95ed302fa"} Nov 27 00:16:28 crc kubenswrapper[4903]: I1127 00:16:28.898725 4903 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 27 00:16:29 crc kubenswrapper[4903]: I1127 00:16:29.666106 4903 scope.go:117] "RemoveContainer" containerID="05c89e3b3e1feba9f68f1716bd8bff9c48530967306886d3600016a32b954c96" Nov 27 00:16:30 crc kubenswrapper[4903]: I1127 00:16:30.919088 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2xvqq" event={"ID":"5af14c60-241c-45b8-b749-a20d62645433","Type":"ContainerStarted","Data":"5523107e4eaec95ac1ca761232719aaedbac5f190b20f8157d452c44cd1c36a5"} Nov 27 00:16:31 crc kubenswrapper[4903]: I1127 00:16:31.932975 4903 generic.go:334] "Generic (PLEG): container finished" podID="5af14c60-241c-45b8-b749-a20d62645433" containerID="5523107e4eaec95ac1ca761232719aaedbac5f190b20f8157d452c44cd1c36a5" exitCode=0 Nov 27 00:16:31 crc kubenswrapper[4903]: I1127 00:16:31.933036 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2xvqq" event={"ID":"5af14c60-241c-45b8-b749-a20d62645433","Type":"ContainerDied","Data":"5523107e4eaec95ac1ca761232719aaedbac5f190b20f8157d452c44cd1c36a5"} Nov 27 00:16:32 crc kubenswrapper[4903]: I1127 00:16:32.949708 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2xvqq" event={"ID":"5af14c60-241c-45b8-b749-a20d62645433","Type":"ContainerStarted","Data":"8075dbe6ea4d910a16bbb6431e57852ef068d03b071ab73c71dd3766844f6080"} Nov 27 00:16:32 crc kubenswrapper[4903]: I1127 00:16:32.979207 4903 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-2xvqq" podStartSLOduration=2.441527259 podStartE2EDuration="5.979184886s" podCreationTimestamp="2025-11-27 00:16:27 +0000 UTC" firstStartedPulling="2025-11-27 00:16:28.89677385 +0000 UTC m=+6917.587008760" lastFinishedPulling="2025-11-27 00:16:32.434431477 +0000 UTC m=+6921.124666387" observedRunningTime="2025-11-27 00:16:32.970000019 +0000 UTC m=+6921.660234929" watchObservedRunningTime="2025-11-27 00:16:32.979184886 +0000 UTC m=+6921.669419796" Nov 27 00:16:37 crc kubenswrapper[4903]: I1127 00:16:37.028359 4903 scope.go:117] "RemoveContainer" containerID="c94c27075d9b691dede6e2bdca4738ac0ca2bb05f39b1b845169635db93d8dc5" Nov 27 00:16:37 crc kubenswrapper[4903]: E1127 00:16:37.029132 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:16:37 crc kubenswrapper[4903]: I1127 00:16:37.555068 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-2xvqq" Nov 27 00:16:37 crc kubenswrapper[4903]: I1127 00:16:37.555400 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-2xvqq" Nov 27 00:16:37 crc kubenswrapper[4903]: I1127 00:16:37.604991 4903 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-2xvqq" Nov 27 00:16:38 crc kubenswrapper[4903]: I1127 00:16:38.058056 4903 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-2xvqq" Nov 27 00:16:38 crc kubenswrapper[4903]: I1127 00:16:38.104399 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2xvqq"] Nov 27 00:16:40 crc kubenswrapper[4903]: I1127 00:16:40.023285 4903 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-2xvqq" podUID="5af14c60-241c-45b8-b749-a20d62645433" containerName="registry-server" containerID="cri-o://8075dbe6ea4d910a16bbb6431e57852ef068d03b071ab73c71dd3766844f6080" gracePeriod=2 Nov 27 00:16:40 crc kubenswrapper[4903]: I1127 00:16:40.592208 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2xvqq" Nov 27 00:16:40 crc kubenswrapper[4903]: I1127 00:16:40.686616 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hcmsh\" (UniqueName: \"kubernetes.io/projected/5af14c60-241c-45b8-b749-a20d62645433-kube-api-access-hcmsh\") pod \"5af14c60-241c-45b8-b749-a20d62645433\" (UID: \"5af14c60-241c-45b8-b749-a20d62645433\") " Nov 27 00:16:40 crc kubenswrapper[4903]: I1127 00:16:40.686755 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5af14c60-241c-45b8-b749-a20d62645433-catalog-content\") pod \"5af14c60-241c-45b8-b749-a20d62645433\" (UID: \"5af14c60-241c-45b8-b749-a20d62645433\") " Nov 27 00:16:40 crc kubenswrapper[4903]: I1127 00:16:40.686787 4903 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5af14c60-241c-45b8-b749-a20d62645433-utilities\") pod \"5af14c60-241c-45b8-b749-a20d62645433\" (UID: \"5af14c60-241c-45b8-b749-a20d62645433\") " Nov 27 00:16:40 crc kubenswrapper[4903]: I1127 00:16:40.689250 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5af14c60-241c-45b8-b749-a20d62645433-utilities" (OuterVolumeSpecName: "utilities") pod "5af14c60-241c-45b8-b749-a20d62645433" (UID: "5af14c60-241c-45b8-b749-a20d62645433"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 27 00:16:40 crc kubenswrapper[4903]: I1127 00:16:40.699808 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5af14c60-241c-45b8-b749-a20d62645433-kube-api-access-hcmsh" (OuterVolumeSpecName: "kube-api-access-hcmsh") pod "5af14c60-241c-45b8-b749-a20d62645433" (UID: "5af14c60-241c-45b8-b749-a20d62645433"). InnerVolumeSpecName "kube-api-access-hcmsh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 27 00:16:40 crc kubenswrapper[4903]: I1127 00:16:40.751330 4903 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5af14c60-241c-45b8-b749-a20d62645433-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5af14c60-241c-45b8-b749-a20d62645433" (UID: "5af14c60-241c-45b8-b749-a20d62645433"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 27 00:16:40 crc kubenswrapper[4903]: I1127 00:16:40.789605 4903 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5af14c60-241c-45b8-b749-a20d62645433-utilities\") on node \"crc\" DevicePath \"\"" Nov 27 00:16:40 crc kubenswrapper[4903]: I1127 00:16:40.789679 4903 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hcmsh\" (UniqueName: \"kubernetes.io/projected/5af14c60-241c-45b8-b749-a20d62645433-kube-api-access-hcmsh\") on node \"crc\" DevicePath \"\"" Nov 27 00:16:40 crc kubenswrapper[4903]: I1127 00:16:40.789730 4903 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5af14c60-241c-45b8-b749-a20d62645433-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 27 00:16:41 crc kubenswrapper[4903]: I1127 00:16:41.041052 4903 generic.go:334] "Generic (PLEG): container finished" podID="5af14c60-241c-45b8-b749-a20d62645433" containerID="8075dbe6ea4d910a16bbb6431e57852ef068d03b071ab73c71dd3766844f6080" exitCode=0 Nov 27 00:16:41 crc kubenswrapper[4903]: I1127 00:16:41.041099 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2xvqq" event={"ID":"5af14c60-241c-45b8-b749-a20d62645433","Type":"ContainerDied","Data":"8075dbe6ea4d910a16bbb6431e57852ef068d03b071ab73c71dd3766844f6080"} Nov 27 00:16:41 crc kubenswrapper[4903]: I1127 00:16:41.041124 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2xvqq" event={"ID":"5af14c60-241c-45b8-b749-a20d62645433","Type":"ContainerDied","Data":"a40cd4adb00345f233a17fc53846d4aeb4fb2b773a0285c965635ab95ed302fa"} Nov 27 00:16:41 crc kubenswrapper[4903]: I1127 00:16:41.041140 4903 scope.go:117] "RemoveContainer" containerID="8075dbe6ea4d910a16bbb6431e57852ef068d03b071ab73c71dd3766844f6080" Nov 27 00:16:41 crc kubenswrapper[4903]: I1127 00:16:41.041196 4903 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2xvqq" Nov 27 00:16:41 crc kubenswrapper[4903]: I1127 00:16:41.080157 4903 scope.go:117] "RemoveContainer" containerID="5523107e4eaec95ac1ca761232719aaedbac5f190b20f8157d452c44cd1c36a5" Nov 27 00:16:41 crc kubenswrapper[4903]: I1127 00:16:41.100797 4903 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2xvqq"] Nov 27 00:16:41 crc kubenswrapper[4903]: I1127 00:16:41.113819 4903 scope.go:117] "RemoveContainer" containerID="2ce017dc9768d92f2329682d01ebdd9a687cee0ed1a139d349169be2dbb36231" Nov 27 00:16:41 crc kubenswrapper[4903]: I1127 00:16:41.119745 4903 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-2xvqq"] Nov 27 00:16:41 crc kubenswrapper[4903]: I1127 00:16:41.170078 4903 scope.go:117] "RemoveContainer" containerID="8075dbe6ea4d910a16bbb6431e57852ef068d03b071ab73c71dd3766844f6080" Nov 27 00:16:41 crc kubenswrapper[4903]: E1127 00:16:41.170863 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8075dbe6ea4d910a16bbb6431e57852ef068d03b071ab73c71dd3766844f6080\": container with ID starting with 8075dbe6ea4d910a16bbb6431e57852ef068d03b071ab73c71dd3766844f6080 not found: ID does not exist" containerID="8075dbe6ea4d910a16bbb6431e57852ef068d03b071ab73c71dd3766844f6080" Nov 27 00:16:41 crc kubenswrapper[4903]: I1127 00:16:41.171008 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8075dbe6ea4d910a16bbb6431e57852ef068d03b071ab73c71dd3766844f6080"} err="failed to get container status \"8075dbe6ea4d910a16bbb6431e57852ef068d03b071ab73c71dd3766844f6080\": rpc error: code = NotFound desc = could not find container \"8075dbe6ea4d910a16bbb6431e57852ef068d03b071ab73c71dd3766844f6080\": container with ID starting with 8075dbe6ea4d910a16bbb6431e57852ef068d03b071ab73c71dd3766844f6080 not found: ID does not exist" Nov 27 00:16:41 crc kubenswrapper[4903]: I1127 00:16:41.171118 4903 scope.go:117] "RemoveContainer" containerID="5523107e4eaec95ac1ca761232719aaedbac5f190b20f8157d452c44cd1c36a5" Nov 27 00:16:41 crc kubenswrapper[4903]: E1127 00:16:41.171517 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5523107e4eaec95ac1ca761232719aaedbac5f190b20f8157d452c44cd1c36a5\": container with ID starting with 5523107e4eaec95ac1ca761232719aaedbac5f190b20f8157d452c44cd1c36a5 not found: ID does not exist" containerID="5523107e4eaec95ac1ca761232719aaedbac5f190b20f8157d452c44cd1c36a5" Nov 27 00:16:41 crc kubenswrapper[4903]: I1127 00:16:41.171621 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5523107e4eaec95ac1ca761232719aaedbac5f190b20f8157d452c44cd1c36a5"} err="failed to get container status \"5523107e4eaec95ac1ca761232719aaedbac5f190b20f8157d452c44cd1c36a5\": rpc error: code = NotFound desc = could not find container \"5523107e4eaec95ac1ca761232719aaedbac5f190b20f8157d452c44cd1c36a5\": container with ID starting with 5523107e4eaec95ac1ca761232719aaedbac5f190b20f8157d452c44cd1c36a5 not found: ID does not exist" Nov 27 00:16:41 crc kubenswrapper[4903]: I1127 00:16:41.171784 4903 scope.go:117] "RemoveContainer" containerID="2ce017dc9768d92f2329682d01ebdd9a687cee0ed1a139d349169be2dbb36231" Nov 27 00:16:41 crc kubenswrapper[4903]: E1127 00:16:41.172155 4903 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2ce017dc9768d92f2329682d01ebdd9a687cee0ed1a139d349169be2dbb36231\": container with ID starting with 2ce017dc9768d92f2329682d01ebdd9a687cee0ed1a139d349169be2dbb36231 not found: ID does not exist" containerID="2ce017dc9768d92f2329682d01ebdd9a687cee0ed1a139d349169be2dbb36231" Nov 27 00:16:41 crc kubenswrapper[4903]: I1127 00:16:41.172202 4903 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ce017dc9768d92f2329682d01ebdd9a687cee0ed1a139d349169be2dbb36231"} err="failed to get container status \"2ce017dc9768d92f2329682d01ebdd9a687cee0ed1a139d349169be2dbb36231\": rpc error: code = NotFound desc = could not find container \"2ce017dc9768d92f2329682d01ebdd9a687cee0ed1a139d349169be2dbb36231\": container with ID starting with 2ce017dc9768d92f2329682d01ebdd9a687cee0ed1a139d349169be2dbb36231 not found: ID does not exist" Nov 27 00:16:42 crc kubenswrapper[4903]: I1127 00:16:42.043057 4903 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5af14c60-241c-45b8-b749-a20d62645433" path="/var/lib/kubelet/pods/5af14c60-241c-45b8-b749-a20d62645433/volumes" Nov 27 00:16:51 crc kubenswrapper[4903]: I1127 00:16:51.028664 4903 scope.go:117] "RemoveContainer" containerID="c94c27075d9b691dede6e2bdca4738ac0ca2bb05f39b1b845169635db93d8dc5" Nov 27 00:16:51 crc kubenswrapper[4903]: E1127 00:16:51.029553 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:17:04 crc kubenswrapper[4903]: I1127 00:17:04.029915 4903 scope.go:117] "RemoveContainer" containerID="c94c27075d9b691dede6e2bdca4738ac0ca2bb05f39b1b845169635db93d8dc5" Nov 27 00:17:04 crc kubenswrapper[4903]: E1127 00:17:04.031546 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:17:17 crc kubenswrapper[4903]: I1127 00:17:17.039164 4903 scope.go:117] "RemoveContainer" containerID="c94c27075d9b691dede6e2bdca4738ac0ca2bb05f39b1b845169635db93d8dc5" Nov 27 00:17:17 crc kubenswrapper[4903]: E1127 00:17:17.043197 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:17:31 crc kubenswrapper[4903]: I1127 00:17:31.029116 4903 scope.go:117] "RemoveContainer" containerID="c94c27075d9b691dede6e2bdca4738ac0ca2bb05f39b1b845169635db93d8dc5" Nov 27 00:17:31 crc kubenswrapper[4903]: E1127 00:17:31.030308 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:17:45 crc kubenswrapper[4903]: I1127 00:17:45.029223 4903 scope.go:117] "RemoveContainer" containerID="c94c27075d9b691dede6e2bdca4738ac0ca2bb05f39b1b845169635db93d8dc5" Nov 27 00:17:45 crc kubenswrapper[4903]: E1127 00:17:45.030000 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:17:56 crc kubenswrapper[4903]: I1127 00:17:56.029563 4903 scope.go:117] "RemoveContainer" containerID="c94c27075d9b691dede6e2bdca4738ac0ca2bb05f39b1b845169635db93d8dc5" Nov 27 00:17:56 crc kubenswrapper[4903]: E1127 00:17:56.030457 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:18:09 crc kubenswrapper[4903]: I1127 00:18:09.029082 4903 scope.go:117] "RemoveContainer" containerID="c94c27075d9b691dede6e2bdca4738ac0ca2bb05f39b1b845169635db93d8dc5" Nov 27 00:18:09 crc kubenswrapper[4903]: E1127 00:18:09.030086 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:18:20 crc kubenswrapper[4903]: I1127 00:18:20.028471 4903 scope.go:117] "RemoveContainer" containerID="c94c27075d9b691dede6e2bdca4738ac0ca2bb05f39b1b845169635db93d8dc5" Nov 27 00:18:20 crc kubenswrapper[4903]: E1127 00:18:20.029426 4903 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-wjwph_openshift-machine-config-operator(232b7aad-b4bd-495a-a411-0cfd48fa372c)\"" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" podUID="232b7aad-b4bd-495a-a411-0cfd48fa372c" Nov 27 00:18:35 crc kubenswrapper[4903]: I1127 00:18:35.029670 4903 scope.go:117] "RemoveContainer" containerID="c94c27075d9b691dede6e2bdca4738ac0ca2bb05f39b1b845169635db93d8dc5" Nov 27 00:18:35 crc kubenswrapper[4903]: I1127 00:18:35.856663 4903 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-wjwph" event={"ID":"232b7aad-b4bd-495a-a411-0cfd48fa372c","Type":"ContainerStarted","Data":"4114a3c5265553c71ff4d430499d9ca4611b83352b5fad150c1af0f75ab188f4"} Nov 27 00:19:29 crc kubenswrapper[4903]: I1127 00:19:29.839102 4903 scope.go:117] "RemoveContainer" containerID="1efcfc641b7ff7199b6027116cf717ef5cdaced6e59ab55a37c130db14601542" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515111714556024453 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015111714557017371 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015111676207016513 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015111676210015455 5ustar corecore